aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/6lowpan/ndisc.c16
-rw-r--r--net/8021q/vlan.c47
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/9p/trans_fd.c17
-rw-r--r--net/9p/trans_usbg.c16
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c24
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/appletalk/ddp.c1
-rw-r--r--net/atm/clip.c75
-rw-r--r--net/atm/common.c14
-rw-r--r--net/atm/lec.c12
-rw-r--r--net/atm/raw.c2
-rw-r--r--net/atm/resources.c9
-rw-r--r--net/ax25/ax25_in.c4
-rw-r--r--net/batman-adv/Kconfig13
-rw-r--r--net/batman-adv/Makefile1
-rw-r--r--net/batman-adv/bat_algo.c1
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c30
-rw-r--r--net/batman-adv/bat_v.c6
-rw-r--r--net/batman-adv/bat_v_elp.c8
-rw-r--r--net/batman-adv/bat_v_ogm.c14
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c34
-rw-r--r--net/batman-adv/hard-interface.c40
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/log.h3
-rw-r--r--net/batman-adv/main.c57
-rw-r--r--net/batman-adv/main.h5
-rw-r--r--net/batman-adv/mesh-interface.c21
-rw-r--r--net/batman-adv/mesh-interface.h1
-rw-r--r--net/batman-adv/multicast.c6
-rw-r--r--net/batman-adv/netlink.c24
-rw-r--r--net/batman-adv/netlink.h1
-rw-r--r--net/batman-adv/network-coding.c1873
-rw-r--r--net/batman-adv/network-coding.h106
-rw-r--r--net/batman-adv/originator.c13
-rw-r--r--net/batman-adv/routing.c9
-rw-r--r--net/batman-adv/send.c23
-rw-r--r--net/batman-adv/translation-table.c4
-rw-r--r--net/batman-adv/types.h216
-rw-r--r--net/bluetooth/af_bluetooth.c9
-rw-r--r--net/bluetooth/aosp.c2
-rw-r--r--net/bluetooth/coredump.c6
-rw-r--r--net/bluetooth/hci_conn.c113
-rw-r--r--net/bluetooth/hci_core.c121
-rw-r--r--net/bluetooth/hci_debugfs.c8
-rw-r--r--net/bluetooth/hci_event.c220
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hci_sync.c354
-rw-r--r--net/bluetooth/iso.c102
-rw-r--r--net/bluetooth/l2cap_core.c25
-rw-r--r--net/bluetooth/l2cap_sock.c10
-rw-r--r--net/bluetooth/lib.c2
-rw-r--r--net/bluetooth/mgmt.c346
-rw-r--r--net/bluetooth/mgmt_config.c4
-rw-r--r--net/bluetooth/mgmt_util.c46
-rw-r--r--net/bluetooth/mgmt_util.h3
-rw-r--r--net/bluetooth/msft.c2
-rw-r--r--net/bluetooth/rfcomm/core.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c11
-rw-r--r--net/bluetooth/smp.c23
-rw-r--r--net/bluetooth/smp.h1
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c3
-rw-r--r--net/bpf/test_run.c82
-rw-r--r--net/bridge/br.c41
-rw-r--r--net/bridge/br_cfm.c6
-rw-r--r--net/bridge/br_fdb.c114
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_input.c8
-rw-r--r--net/bridge/br_mrp.c8
-rw-r--r--net/bridge/br_multicast.c34
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_switchdev.c5
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c12
-rw-r--r--net/bridge/netfilter/Kconfig11
-rw-r--r--net/bridge/netfilter/ebtables.c14
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c11
-rw-r--r--net/caif/cfctrl.c298
-rw-r--r--net/can/af_can.c8
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/can/isotp.c7
-rw-r--r--net/can/j1939/bus.c5
-rw-r--r--net/can/j1939/j1939-priv.h1
-rw-r--r--net/can/j1939/main.c5
-rw-r--r--net/can/j1939/socket.c57
-rw-r--r--net/can/raw.c72
-rw-r--r--net/ceph/Kconfig3
-rw-r--r--net/ceph/messenger.c22
-rw-r--r--net/ceph/messenger_v1.c56
-rw-r--r--net/ceph/messenger_v2.c246
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c60
-rw-r--r--net/core/dev.c507
-rw-r--r--net/core/dev.h24
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/dev_api.c13
-rw-r--r--net/core/dev_ioctl.c27
-rw-r--r--net/core/devmem.c14
-rw-r--r--net/core/devmem.h9
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/dst_cache.c2
-rw-r--r--net/core/filter.c259
-rw-r--r--net/core/gen_estimator.c2
-rw-r--r--net/core/gro.c12
-rw-r--r--net/core/gro_cells.c9
-rw-r--r--net/core/hotdata.c5
-rw-r--r--net/core/ieee8021q_helpers.c44
-rw-r--r--net/core/link_watch.c4
-rw-r--r--net/core/lwt_bpf.c4
-rw-r--r--net/core/neighbour.c558
-rw-r--r--net/core/net-procfs.c3
-rw-r--r--net/core/net-sysfs.c90
-rw-r--r--net/core/net-sysfs.h2
-rw-r--r--net/core/net_namespace.c104
-rw-r--r--net/core/netclassid_cgroup.c4
-rw-r--r--net/core/netdev-genl-gen.c5
-rw-r--r--net/core/netdev-genl.c136
-rw-r--r--net/core/netdev_queues.c27
-rw-r--r--net/core/netdev_rx_queue.c15
-rw-r--r--net/core/netpoll.c490
-rw-r--r--net/core/page_pool.c159
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/request_sock.c4
-rw-r--r--net/core/rtnetlink.c25
-rw-r--r--net/core/scm.c36
-rw-r--r--net/core/selftests.c72
-rw-r--r--net/core/skbuff.c77
-rw-r--r--net/core/skmsg.c9
-rw-r--r--net/core/sock.c181
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/core/sock_map.c13
-rw-r--r--net/core/stream.c8
-rw-r--r--net/core/sysctl_net_core.c37
-rw-r--r--net/core/xdp.c21
-rw-r--r--net/devlink/core.c2
-rw-r--r--net/devlink/health.c109
-rw-r--r--net/devlink/netlink_gen.c20
-rw-r--r--net/devlink/netlink_gen.h1
-rw-r--r--net/devlink/param.c30
-rw-r--r--net/devlink/port.c35
-rw-r--r--net/devlink/rate.c131
-rw-r--r--net/dsa/Kconfig16
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/dsa/tag_brcm.c119
-rw-r--r--net/dsa/user.c2
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ethtool/Makefile2
-rw-r--r--net/ethtool/common.c82
-rw-r--r--net/ethtool/common.h15
-rw-r--r--net/ethtool/fec.c75
-rw-r--r--net/ethtool/ioctl.c421
-rw-r--r--net/ethtool/netlink.c95
-rw-r--r--net/ethtool/netlink.h12
-rw-r--r--net/ethtool/pause.c1
-rw-r--r--net/ethtool/pse-pd.c65
-rw-r--r--net/ethtool/rss.c942
-rw-r--r--net/ethtool/tsconfig.c12
-rw-r--r--net/handshake/tlshd.c6
-rw-r--r--net/hsr/hsr_device.c28
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/hsr/hsr_main.h3
-rw-r--r--net/hsr/hsr_netlink.c8
-rw-r--r--net/hsr/hsr_slave.c13
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/arp.c18
-rw-r--r--net/ipv4/cipso_ipv4.c13
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/esp4.c4
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c10
-rw-r--r--net/ipv4/fou_core.c32
-rw-r--r--net/ipv4/fou_nl.c4
-rw-r--r--net/ipv4/icmp.c61
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c84
-rw-r--r--net/ipv4/inet_diag.c572
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inet_hashtables.c112
-rw-r--r--net/ipv4/inet_timewait_sock.c15
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_input.c51
-rw-r--r--net/ipv4/ip_options.c5
-rw-r--r--net/ipv4/ip_output.c30
-rw-r--r--net/ipv4/ip_tunnel.c18
-rw-r--r--net/ipv4/ip_tunnel_core.c10
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipmr.c174
-rw-r--r--net/ipv4/netfilter.c13
-rw-r--r--net/ipv4/netfilter/Kconfig23
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c4
-rw-r--r--net/ipv4/netfilter/nf_dup_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c58
-rw-r--r--net/ipv4/netfilter/nf_socket_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c4
-rw-r--r--net/ipv4/nexthop.c54
-rw-r--r--net/ipv4/ping.c72
-rw-r--r--net/ipv4/proc.c66
-rw-r--r--net/ipv4/raw.c11
-rw-r--r--net/ipv4/raw_diag.c10
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c19
-rw-r--r--net/ipv4/tcp.c139
-rw-r--r--net/ipv4/tcp_ao.c9
-rw-r--r--net/ipv4/tcp_bpf.c5
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_diag.c461
-rw-r--r--net/ipv4/tcp_fastopen.c12
-rw-r--r--net/ipv4/tcp_input.c658
-rw-r--r--net/ipv4/tcp_ipv4.c398
-rw-r--r--net/ipv4/tcp_metrics.c10
-rw-r--r--net/ipv4/tcp_minisocks.c82
-rw-r--r--net/ipv4/tcp_offload.c5
-rw-r--r--net/ipv4/tcp_output.c438
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv4/udp.c198
-rw-r--r--net/ipv4/udp_diag.c10
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv4/udp_tunnel_core.c24
-rw-r--r--net/ipv4/udp_tunnel_nic.c80
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv4/xfrm4_input.c3
-rw-r--r--net/ipv4/xfrm4_output.c2
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/Kconfig7
-rw-r--r--net/ipv6/addrconf.c121
-rw-r--r--net/ipv6/addrlabel.c32
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/ah6.c50
-rw-r--r--net/ipv6/anycast.c101
-rw-r--r--net/ipv6/calipso.c14
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/exthdrs.c16
-rw-r--r--net/ipv6/icmp.c11
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c6
-rw-r--r--net/ipv6/inet6_hashtables.c62
-rw-r--r--net/ipv6/ioam6.c17
-rw-r--r--net/ipv6/ioam6_iptunnel.c4
-rw-r--r--net/ipv6/ip6_fib.c50
-rw-r--r--net/ipv6/ip6_gre.c118
-rw-r--r--net/ipv6/ip6_icmp.c6
-rw-r--r--net/ipv6/ip6_input.c40
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c94
-rw-r--r--net/ipv6/ip6_tunnel.c52
-rw-r--r--net/ipv6/ip6_udp_tunnel.c20
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c157
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c34
-rw-r--r--net/ipv6/mcast.c422
-rw-r--r--net/ipv6/ndisc.c186
-rw-r--r--net/ipv6/netfilter.c9
-rw-r--r--net/ipv6/netfilter/Kconfig20
-rw-r--r--net/ipv6/netfilter/nf_dup_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c74
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c5
-rw-r--r--net/ipv6/output_core.c10
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/proc.c91
-rw-r--r--net/ipv6/raw.c13
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c139
-rw-r--r--net/ipv6/rpl_iptunnel.c12
-rw-r--r--net/ipv6/seg6.c7
-rw-r--r--net/ipv6/seg6_hmac.c215
-rw-r--r--net/ipv6/seg6_iptunnel.c26
-rw-r--r--net/ipv6/seg6_local.c26
-rw-r--r--net/ipv6/sit.c106
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/tcp_ipv6.c111
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp.c30
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_input.c3
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/iucv/iucv.c1
-rw-r--r--net/kcm/kcmsock.c19
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/l2tp/l2tp_ppp.c25
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/agg-rx.c6
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c381
-rw-r--r--net/mac80211/chan.c40
-rw-r--r--net/mac80211/debug.h5
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c4
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/driver-ops.c5
-rw-r--r--net/mac80211/driver-ops.h61
-rw-r--r--net/mac80211/ethtool.c6
-rw-r--r--net/mac80211/ht.c40
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h90
-rw-r--r--net/mac80211/iface.c64
-rw-r--r--net/mac80211/key.c66
-rw-r--r--net/mac80211/link.c15
-rw-r--r--net/mac80211/main.c121
-rw-r--r--net/mac80211/mesh.c5
-rw-r--r--net/mac80211/mesh_ps.c2
-rw-r--r--net/mac80211/mlme.c429
-rw-r--r--net/mac80211/offchannel.c12
-rw-r--r--net/mac80211/parse.c6
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c11
-rw-r--r--net/mac80211/rx.c157
-rw-r--r--net/mac80211/s1g.c26
-rw-r--r--net/mac80211/scan.c36
-rw-r--r--net/mac80211/sta_info.c423
-rw-r--r--net/mac80211/sta_info.h59
-rw-r--r--net/mac80211/status.c21
-rw-r--r--net/mac80211/tdls.c2
-rw-r--r--net/mac80211/tests/Makefile2
-rw-r--r--net/mac80211/tests/chan-mode.c30
-rw-r--r--net/mac80211/tests/s1g_tim.c356
-rw-r--r--net/mac80211/trace.h105
-rw-r--r--net/mac80211/tx.c332
-rw-r--r--net/mac80211/util.c169
-rw-r--r--net/mac80211/vht.c5
-rw-r--r--net/mctp/af_mctp.c218
-rw-r--r--net/mctp/route.c688
-rw-r--r--net/mctp/test/route-test.c797
-rw-r--r--net/mctp/test/sock-test.c396
-rw-r--r--net/mctp/test/utils.c232
-rw-r--r--net/mctp/test/utils.h61
-rw-r--r--net/mpls/af_mpls.c10
-rw-r--r--net/mptcp/crypto.c35
-rw-r--r--net/mptcp/ctrl.c13
-rw-r--r--net/mptcp/mib.c17
-rw-r--r--net/mptcp/mib.h7
-rw-r--r--net/mptcp/mptcp_diag.c15
-rw-r--r--net/mptcp/options.c18
-rw-r--r--net/mptcp/pm.c80
-rw-r--r--net/mptcp/pm_kernel.c576
-rw-r--r--net/mptcp/pm_netlink.c16
-rw-r--r--net/mptcp/pm_userspace.c2
-rw-r--r--net/mptcp/protocol.c337
-rw-r--r--net/mptcp/protocol.h65
-rw-r--r--net/mptcp/sockopt.c66
-rw-r--r--net/mptcp/subflow.c55
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-rsp.c1
-rw-r--r--net/netfilter/Kconfig30
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h8
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_bpf_link.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c50
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c105
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c826
-rw-r--r--net/netfilter/nf_conntrack_standalone.c121
-rw-r--r--net/netfilter/nf_log.c26
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_nat_proto.c43
-rw-r--r--net/netfilter/nf_tables_api.c314
-rw-r--r--net/netfilter/nf_tables_trace.c3
-rw-r--r--net/netfilter/nfnetlink.c3
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c5
-rw-r--r--net/netfilter/nfnetlink_hook.c76
-rw-r--r--net/netfilter/nft_chain_filter.c2
-rw-r--r--net/netfilter/nft_dynset.c10
-rw-r--r--net/netfilter/nft_exthdr.c8
-rw-r--r--net/netfilter/nft_flow_offload.c4
-rw-r--r--net/netfilter/nft_lookup.c67
-rw-r--r--net/netfilter/nft_objref.c44
-rw-r--r--net/netfilter/nft_payload.c20
-rw-r--r--net/netfilter/nft_set_bitmap.c14
-rw-r--r--net/netfilter/nft_set_hash.c154
-rw-r--r--net/netfilter/nft_set_pipapo.c285
-rw-r--r--net/netfilter/nft_set_pipapo.h8
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c146
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.h4
-rw-r--r--net/netfilter/nft_set_rbtree.c81
-rw-r--r--net/netfilter/nft_socket.c2
-rw-r--r--net/netfilter/x_tables.c16
-rw-r--r--net/netfilter/xt_nfacct.c4
-rw-r--r--net/netlabel/netlabel_user.c8
-rw-r--r--net/netlink/af_netlink.c91
-rw-r--r--net/netlink/diag.c2
-rw-r--r--net/netlink/genetlink.c3
-rw-r--r--net/nfc/nci/ntf.c135
-rw-r--r--net/nfc/nci/uart.c8
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/openvswitch/actions.c29
-rw-r--r--net/openvswitch/datapath.c50
-rw-r--r--net/openvswitch/datapath.h6
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/flow.c12
-rw-r--r--net/openvswitch/flow_table.c7
-rw-r--r--net/openvswitch/vport.c1
-rw-r--r--net/packet/af_packet.c175
-rw-r--r--net/packet/diag.c4
-rw-r--r--net/packet/internal.h14
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/socket.c29
-rw-r--r--net/psp/Kconfig15
-rw-r--r--net/psp/Makefile5
-rw-r--r--net/psp/psp-nl-gen.c119
-rw-r--r--net/psp/psp-nl-gen.h39
-rw-r--r--net/psp/psp.h54
-rw-r--r--net/psp/psp_main.c322
-rw-r--r--net/psp/psp_nl.c505
-rw-r--r--net/psp/psp_sock.c292
-rw-r--r--net/rds/af_rds.c4
-rw-r--r--net/rds/connection.c9
-rw-r--r--net/rds/ib_frmr.c20
-rw-r--r--net/rds/ib_mr.h1
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rds/ib_recv.c2
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c4
-rw-r--r--net/rds/send.c6
-rw-r--r--net/rds/tcp_listen.c30
-rw-r--r--net/rfkill/input.c2
-rw-r--r--net/rfkill/rfkill-gpio.c4
-rw-r--r--net/rose/af_rose.c13
-rw-r--r--net/rose/rose_in.c15
-rw-r--r--net/rose/rose_route.c77
-rw-r--r--net/rose/rose_timer.c2
-rw-r--r--net/rxrpc/ar-internal.h19
-rw-r--r--net/rxrpc/call_accept.c18
-rw-r--r--net/rxrpc/call_object.c28
-rw-r--r--net/rxrpc/io_thread.c14
-rw-r--r--net/rxrpc/output.c27
-rw-r--r--net/rxrpc/peer_object.c6
-rw-r--r--net/rxrpc/recvmsg.c23
-rw-r--r--net/rxrpc/rxgk.c18
-rw-r--r--net/rxrpc/rxgk_app.c29
-rw-r--r--net/rxrpc/rxgk_common.h14
-rw-r--r--net/rxrpc/rxperf.c2
-rw-r--r--net/rxrpc/security.c8
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c21
-rw-r--r--net/sched/act_connmark.c18
-rw-r--r--net/sched/act_csum.c18
-rw-r--r--net/sched/act_ct.c30
-rw-r--r--net/sched/act_ctinfo.c42
-rw-r--r--net/sched/act_mpls.c21
-rw-r--r--net/sched/act_nat.c25
-rw-r--r--net/sched/act_pedit.c20
-rw-r--r--net/sched/act_police.c18
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c20
-rw-r--r--net/sched/act_skbmod.c22
-rw-r--r--net/sched/act_tunnel_key.c16
-rw-r--r--net/sched/act_vlan.c16
-rw-r--r--net/sched/bpf_qdisc.c9
-rw-r--r--net/sched/em_text.c2
-rw-r--r--net/sched/sch_api.c56
-rw-r--r--net/sched/sch_cake.c19
-rw-r--r--net/sched/sch_codel.c12
-rw-r--r--net/sched/sch_dualpi2.c1176
-rw-r--r--net/sched/sch_ets.c11
-rw-r--r--net/sched/sch_fq.c12
-rw-r--r--net/sched/sch_fq_codel.c12
-rw-r--r--net/sched/sch_fq_pie.c12
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_hfsc.c16
-rw-r--r--net/sched/sch_hhf.c12
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c40
-rw-r--r--net/sched/sch_pie.c12
-rw-r--r--net/sched/sch_qfq.c35
-rw-r--r--net/sched/sch_taprio.c39
-rw-r--r--net/sctp/Kconfig47
-rw-r--r--net/sctp/auth.c166
-rw-r--r--net/sctp/chunk.c3
-rw-r--r--net/sctp/diag.c2
-rw-r--r--net/sctp/endpointola.c23
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/inqueue.c13
-rw-r--r--net/sctp/ipv6.c9
-rw-r--r--net/sctp/proc.c16
-rw-r--r--net/sctp/protocol.c17
-rw-r--r--net/sctp/sm_make_chunk.c60
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c49
-rw-r--r--net/sctp/sysctl.c49
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/Kconfig16
-rw-r--r--net/smc/Makefile1
-rw-r--r--net/smc/af_smc.c56
-rw-r--r--net/smc/smc.h8
-rw-r--r--net/smc/smc_clc.c81
-rw-r--r--net/smc/smc_core.c42
-rw-r--r--net/smc/smc_core.h5
-rw-r--r--net/smc/smc_diag.c4
-rw-r--r--net/smc/smc_ib.c21
-rw-r--r--net/smc/smc_inet.c13
-rw-r--r--net/smc/smc_ism.c233
-rw-r--r--net/smc/smc_ism.h36
-rw-r--r--net/smc/smc_loopback.c427
-rw-r--r--net/smc/smc_loopback.h60
-rw-r--r--net/smc/smc_pnet.c72
-rw-r--r--net/smc/smc_tx.c3
-rw-r--r--net/socket.c92
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/sunrpc/Kconfig17
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c15
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c4
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c8
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c5
-rw-r--r--net/sunrpc/clnt.c36
-rw-r--r--net/sunrpc/rpc_pipe.c532
-rw-r--r--net/sunrpc/sched.c4
-rw-r--r--net/sunrpc/socklib.c166
-rw-r--r--net/sunrpc/svc.c65
-rw-r--r--net/sunrpc/svc_xprt.c20
-rw-r--r--net/sunrpc/svcsock.c69
-rw-r--r--net/sunrpc/sysfs.c2
-rw-r--r--net/sunrpc/xdr.c121
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c2
-rw-r--r--net/sunrpc/xprtsock.c40
-rw-r--r--net/tipc/addr.c6
-rw-r--r--net/tipc/addr.h2
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/socket.c8
-rw-r--r--net/tipc/topsrv.c6
-rw-r--r--net/tipc/udp_media.c16
-rw-r--r--net/tls/tls.h6
-rw-r--r--net/tls/tls_device.c20
-rw-r--r--net/tls/tls_main.c7
-rw-r--r--net/tls/tls_proc.c10
-rw-r--r--net/tls/tls_strp.c28
-rw-r--r--net/tls/tls_sw.c57
-rw-r--r--net/unix/af_unix.c300
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/garbage.c2
-rw-r--r--net/vmw_vsock/Kconfig2
-rw-r--r--net/vmw_vsock/af_vsock.c106
-rw-r--r--net/vmw_vsock/hyperv_transport.c17
-rw-r--r--net/vmw_vsock/virtio_transport.c22
-rw-r--r--net/vmw_vsock/virtio_transport_common.c9
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/vmw_vsock/vsock_loopback.c2
-rw-r--r--net/wireless/chan.c103
-rw-r--r--net/wireless/core.c32
-rw-r--r--net/wireless/core.h11
-rw-r--r--net/wireless/ethtool.c2
-rw-r--r--net/wireless/mlme.c34
-rw-r--r--net/wireless/nl80211.c1649
-rw-r--r--net/wireless/rdev-ops.h45
-rw-r--r--net/wireless/reg.c106
-rw-r--r--net/wireless/scan.c216
-rw-r--r--net/wireless/sme.c44
-rw-r--r--net/wireless/trace.h220
-rw-r--r--net/wireless/util.c117
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/wireless/wext-core.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/x25/x25_dev.c22
-rw-r--r--net/xdp/xsk.c256
-rw-r--r--net/xdp/xsk_diag.c2
-rw-r--r--net/xdp/xsk_queue.h57
-rw-r--r--net/xfrm/espintcp.c6
-rw-r--r--net/xfrm/xfrm_device.c15
-rw-r--r--net/xfrm/xfrm_input.c17
-rw-r--r--net/xfrm/xfrm_interface_core.c7
-rw-r--r--net/xfrm/xfrm_ipcomp.c3
-rw-r--r--net/xfrm/xfrm_policy.c20
-rw-r--r--net/xfrm/xfrm_proc.c12
-rw-r--r--net/xfrm/xfrm_state.c151
-rw-r--r--net/xfrm/xfrm_user.c13
602 files changed, 23020 insertions, 14646 deletions
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
index c40b98f7743c..868d28583c0a 100644
--- a/net/6lowpan/ndisc.c
+++ b/net/6lowpan/ndisc.c
@@ -20,9 +20,8 @@ static int lowpan_ndisc_parse_802154_options(const struct net_device *dev,
switch (nd_opt->nd_opt_len) {
case NDISC_802154_SHORT_ADDR_LENGTH:
if (ndopts->nd_802154_opt_array[nd_opt->nd_opt_type])
- ND_PRINTK(2, warn,
- "%s: duplicated short addr ND6 option found: type=%d\n",
- __func__, nd_opt->nd_opt_type);
+ net_dbg_ratelimited("%s: duplicated short addr ND6 option found: type=%d\n",
+ __func__, nd_opt->nd_opt_type);
else
ndopts->nd_802154_opt_array[nd_opt->nd_opt_type] = nd_opt;
return 1;
@@ -63,8 +62,7 @@ static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_src_lladdr,
IEEE802154_SHORT_ADDR_LEN, 0);
if (!lladdr_short) {
- ND_PRINTK(2, warn,
- "NA: invalid short link-layer address length\n");
+ net_dbg_ratelimited("NA: invalid short link-layer address length\n");
return;
}
}
@@ -75,8 +73,7 @@ static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
lladdr_short = __ndisc_opt_addr_data(ndopts->nd_802154_opts_tgt_lladdr,
IEEE802154_SHORT_ADDR_LEN, 0);
if (!lladdr_short) {
- ND_PRINTK(2, warn,
- "NA: invalid short link-layer address length\n");
+ net_dbg_ratelimited("NA: invalid short link-layer address length\n");
return;
}
}
@@ -209,9 +206,8 @@ static void lowpan_ndisc_prefix_rcv_add_addr(struct net *net,
sllao, tokenized, valid_lft,
prefered_lft);
if (err)
- ND_PRINTK(2, warn,
- "RA: could not add a short address based address for prefix: %pI6c\n",
- &pinfo->prefix);
+ net_dbg_ratelimited("RA: could not add a short address based address for prefix: %pI6c\n",
+ &pinfo->prefix);
}
}
#endif
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 06908e37c3d9..fda3a80e9340 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -357,6 +357,35 @@ static int __vlan_device_event(struct net_device *dev, unsigned long event)
return err;
}
+static void vlan_vid0_add(struct net_device *dev)
+{
+ struct vlan_info *vlan_info;
+ int err;
+
+ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return;
+
+ pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name);
+
+ err = vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+ if (err)
+ return;
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ vlan_info->auto_vid0 = true;
+}
+
+static void vlan_vid0_del(struct net_device *dev)
+{
+ struct vlan_info *vlan_info = rtnl_dereference(dev->vlan_info);
+
+ if (!vlan_info || !vlan_info->auto_vid0)
+ return;
+
+ vlan_info->auto_vid0 = false;
+ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+}
+
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
@@ -378,15 +407,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
return notifier_from_errno(err);
}
- if ((event == NETDEV_UP) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
- pr_info("adding VLAN 0 to HW filter on device %s\n",
- dev->name);
- vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
- }
- if (event == NETDEV_DOWN &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+ if (event == NETDEV_UP)
+ vlan_vid0_add(dev);
+ else if (event == NETDEV_DOWN)
+ vlan_vid0_del(dev);
vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info)
@@ -446,7 +470,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
list_add(&vlandev->close_list, &close_list);
}
- dev_close_many(&close_list, false);
+ netif_close_many(&close_list, false);
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
vlan_stacked_transfer_operstate(dev, vlandev,
@@ -459,7 +483,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_UP:
/* Put all VLANs for this dev in the up state too. */
vlan_group_for_each_dev(grp, i, vlandev) {
- flgs = dev_get_flags(vlandev);
+ flgs = netif_get_flags(vlandev);
if (flgs & IFF_UP)
continue;
@@ -741,3 +765,4 @@ module_exit(vlan_cleanup_module);
MODULE_DESCRIPTION("802.1Q/802.1ad VLAN Protocol");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5eaf38875554..c7ffe591d593 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -33,6 +33,7 @@ struct vlan_info {
struct vlan_group grp;
struct list_head vid_list;
unsigned int nr_vids;
+ bool auto_vid0;
struct rcu_head rcu;
};
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 339ec4e54778..a516745f732f 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -666,7 +666,6 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
- __poll_t n;
int err;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -686,13 +685,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
- if (test_and_clear_bit(Wpending, &m->wsched))
- n = EPOLLOUT;
- else
- n = p9_fd_poll(m->client, NULL, NULL);
-
- if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
- schedule_work(&m->wq);
+ p9_poll_mux(m);
return 0;
}
@@ -726,10 +719,10 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
spin_lock(&m->req_lock);
- /* Ignore cancelled request if message has been received
- * before lock.
- */
- if (req->status == REQ_STATUS_RCVD) {
+ /* Ignore cancelled request if status changed since the request was
+ * processed in p9_client_flush()
+ */
+ if (req->status != REQ_STATUS_SENT) {
spin_unlock(&m->req_lock);
return 0;
}
diff --git a/net/9p/trans_usbg.c b/net/9p/trans_usbg.c
index 6b694f117aef..468f7e8f0277 100644
--- a/net/9p/trans_usbg.c
+++ b/net/9p/trans_usbg.c
@@ -231,6 +231,8 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
struct f_usb9pfs *usb9pfs = ep->driver_data;
struct usb_composite_dev *cdev = usb9pfs->function.config->cdev;
struct p9_req_t *p9_rx_req;
+ unsigned int req_size = req->actual;
+ int status = REQ_STATUS_RCVD;
if (req->status) {
dev_err(&cdev->gadget->dev, "%s usb9pfs complete --> %d, %d/%d\n",
@@ -242,11 +244,19 @@ static void usb9pfs_rx_complete(struct usb_ep *ep, struct usb_request *req)
if (!p9_rx_req)
return;
- memcpy(p9_rx_req->rc.sdata, req->buf, req->actual);
+ if (req_size > p9_rx_req->rc.capacity) {
+ dev_err(&cdev->gadget->dev,
+ "%s received data size %u exceeds buffer capacity %zu\n",
+ ep->name, req_size, p9_rx_req->rc.capacity);
+ req_size = 0;
+ status = REQ_STATUS_ERROR;
+ }
+
+ memcpy(p9_rx_req->rc.sdata, req->buf, req_size);
- p9_rx_req->rc.size = req->actual;
+ p9_rx_req->rc.size = req_size;
- p9_client_cb(usb9pfs->client, p9_rx_req, REQ_STATUS_RCVD);
+ p9_client_cb(usb9pfs->client, p9_rx_req, status);
p9_req_put(usb9pfs->client, p9_rx_req);
complete(&usb9pfs->received);
diff --git a/net/Kconfig b/net/Kconfig
index ebc80a98fc91..1d3f757d4b07 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -82,11 +82,13 @@ config NET_CRC32C
menu "Networking options"
source "net/packet/Kconfig"
+source "net/psp/Kconfig"
source "net/unix/Kconfig"
source "net/tls/Kconfig"
source "net/xfrm/Kconfig"
source "net/iucv/Kconfig"
source "net/smc/Kconfig"
+source "drivers/dibs/Kconfig"
source "net/xdp/Kconfig"
config NET_HANDSHAKE
@@ -247,7 +249,7 @@ source "net/ipv4/netfilter/Kconfig"
source "net/ipv6/netfilter/Kconfig"
source "net/bridge/netfilter/Kconfig"
-endif
+endif # if NETFILTER
source "net/sctp/Kconfig"
source "net/rds/Kconfig"
@@ -408,9 +410,9 @@ config NET_DROP_MONITOR
just checking the various proc files and other utilities for
drop statistics, say N here.
-endmenu
+endmenu # Network testing
-endmenu
+endmenu # Networking options
source "net/ax25/Kconfig"
source "net/can/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index aac960c41db6..90e3d72bf58b 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
obj-$(CONFIG_UNIX) += unix/
+obj-$(CONFIG_INET_PSP) += psp/
obj-y += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9c787e2e4b17..4744e3fd4544 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -35,6 +35,7 @@
#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
+#include <linux/refcount.h>
int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
int sysctl_aarp_tick_time = AARP_TICK_TIME;
@@ -44,6 +45,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
/* Lists of aarp entries */
/**
* struct aarp_entry - AARP entry
+ * @refcnt: Reference count
* @last_sent: Last time we xmitted the aarp request
* @packet_queue: Queue of frames wait for resolution
* @status: Used for proxy AARP
@@ -55,6 +57,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
* @next: Next entry in chain
*/
struct aarp_entry {
+ refcount_t refcnt;
/* These first two are only used for unresolved entries */
unsigned long last_sent;
struct sk_buff_head packet_queue;
@@ -79,6 +82,17 @@ static DEFINE_RWLOCK(aarp_lock);
/* Used to walk the list and purge/kick entries. */
static struct timer_list aarp_timer;
+static inline void aarp_entry_get(struct aarp_entry *a)
+{
+ refcount_inc(&a->refcnt);
+}
+
+static inline void aarp_entry_put(struct aarp_entry *a)
+{
+ if (refcount_dec_and_test(&a->refcnt))
+ kfree(a);
+}
+
/*
* Delete an aarp queue
*
@@ -87,7 +101,7 @@ static struct timer_list aarp_timer;
static void __aarp_expire(struct aarp_entry *a)
{
skb_queue_purge(&a->packet_queue);
- kfree(a);
+ aarp_entry_put(a);
}
/*
@@ -380,9 +394,11 @@ static void aarp_purge(void)
static struct aarp_entry *aarp_alloc(void)
{
struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC);
+ if (!a)
+ return NULL;
- if (a)
- skb_queue_head_init(&a->packet_queue);
+ refcount_set(&a->refcnt, 1);
+ skb_queue_head_init(&a->packet_queue);
return a;
}
@@ -477,6 +493,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
entry->dev = atif->dev;
write_lock_bh(&aarp_lock);
+ aarp_entry_get(entry);
hash = sa->s_node % (AARP_HASH_SIZE - 1);
entry->next = proxies[hash];
@@ -502,6 +519,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
retval = 1;
}
+ aarp_entry_put(entry);
write_unlock_bh(&aarp_lock);
out:
return retval;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 9c1241292d1d..01787fb6a7bc 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -181,7 +181,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
+ from_kuid_munged(seq_user_ns(seq), sk_uid(s)));
out:
return 0;
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 73ea7e67f05a..30242fe10341 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -576,6 +576,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
/* Fill in the routing entry */
rt->target = ta->sat_addr;
+ dev_put(rt->dev); /* Release old device */
dev_hold(devhint);
rt->dev = devhint;
rt->flags = r->rt_flags;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 61b5b700817d..f7a5565e794e 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -45,7 +45,8 @@
#include <net/atmclip.h>
static struct net_device *clip_devs;
-static struct atm_vcc *atmarpd;
+static struct atm_vcc __rcu *atmarpd;
+static DEFINE_MUTEX(atmarpd_lock);
static struct timer_list idle_timer;
static const struct neigh_ops clip_neigh_ops;
@@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
{
struct sock *sk;
struct atmarp_ctrl *ctrl;
+ struct atm_vcc *vcc;
struct sk_buff *skb;
+ int err = 0;
pr_debug("(%d)\n", type);
- if (!atmarpd)
- return -EUNATCH;
+
+ rcu_read_lock();
+ vcc = rcu_dereference(atmarpd);
+ if (!vcc) {
+ err = -EUNATCH;
+ goto unlock;
+ }
skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ err = -ENOMEM;
+ goto unlock;
+ }
ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
- atm_force_charge(atmarpd, skb->truesize);
+ atm_force_charge(vcc, skb->truesize);
- sk = sk_atm(atmarpd);
+ sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
- return 0;
+unlock:
+ rcu_read_unlock();
+ return err;
}
static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
@@ -193,12 +205,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
pr_debug("\n");
- if (!clip_devs) {
- atm_return(vcc, skb->truesize);
- kfree_skb(skb);
- return;
- }
-
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
@@ -208,6 +214,11 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
return;
}
atm_return(vcc, skb->truesize);
+ if (!clip_devs) {
+ kfree_skb(skb);
+ return;
+ }
+
skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
/* clip_vcc->entry == NULL if we don't have an IP address yet */
if (!skb->dev) {
@@ -418,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
if (!vcc->push)
return -EBADFD;
+ if (vcc->user_back)
+ return -EINVAL;
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
if (!clip_vcc)
return -ENOMEM;
@@ -608,17 +621,27 @@ static void atmarpd_close(struct atm_vcc *vcc)
{
pr_debug("\n");
- rtnl_lock();
- atmarpd = NULL;
+ mutex_lock(&atmarpd_lock);
+ RCU_INIT_POINTER(atmarpd, NULL);
+ mutex_unlock(&atmarpd_lock);
+
+ synchronize_rcu();
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
- rtnl_unlock();
pr_debug("(done)\n");
module_put(THIS_MODULE);
}
+static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ atm_return_tx(vcc, skb);
+ dev_kfree_skb_any(skb);
+ return 0;
+}
+
static const struct atmdev_ops atmarpd_dev_ops = {
- .close = atmarpd_close
+ .close = atmarpd_close,
+ .send = atmarpd_send
};
@@ -632,15 +655,18 @@ static struct atm_dev atmarpd_dev = {
static int atm_init_atmarp(struct atm_vcc *vcc)
{
- rtnl_lock();
+ if (vcc->push == clip_push)
+ return -EINVAL;
+
+ mutex_lock(&atmarpd_lock);
if (atmarpd) {
- rtnl_unlock();
+ mutex_unlock(&atmarpd_lock);
return -EADDRINUSE;
}
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
- atmarpd = vcc;
+ rcu_assign_pointer(atmarpd, vcc);
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
/* allow replies and avoid getting closed if signaling dies */
@@ -649,13 +675,14 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
vcc->push = NULL;
vcc->pop = NULL; /* crash */
vcc->push_oam = NULL; /* crash */
- rtnl_unlock();
+ mutex_unlock(&atmarpd_lock);
return 0;
}
static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct atm_vcc *vcc = ATM_SD(sock);
+ struct sock *sk = sock->sk;
int err = 0;
switch (cmd) {
@@ -676,14 +703,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
err = clip_create(arg);
break;
case ATMARPD_CTRL:
+ lock_sock(sk);
err = atm_init_atmarp(vcc);
if (!err) {
sock->state = SS_CONNECTED;
__module_get(THIS_MODULE);
}
+ release_sock(sk);
break;
case ATMARP_MKIP:
+ lock_sock(sk);
err = clip_mkip(vcc, arg);
+ release_sock(sk);
break;
case ATMARP_SETENTRY:
err = clip_setentry(vcc, (__force __be32)arg);
diff --git a/net/atm/common.c b/net/atm/common.c
index 9b75699992ff..881c7f259dbd 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -635,17 +635,27 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
skb->dev = NULL; /* for paths shared with net_device interfaces */
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
- kfree_skb(skb);
error = -EFAULT;
- goto out;
+ goto free_skb;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
+
+ if (vcc->dev->ops->pre_send) {
+ error = vcc->dev->ops->pre_send(vcc, skb);
+ if (error)
+ goto free_skb;
+ }
+
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
+free_skb:
+ atm_return_tx(vcc, skb);
+ kfree_skb(skb);
+ goto out;
}
__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index acef984f3367..afb8d3eb2185 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -124,6 +124,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* Device structures */
static struct net_device *dev_lec[MAX_LEC_ITF];
+static DEFINE_MUTEX(lec_mutex);
#if IS_ENABLED(CONFIG_BRIDGE)
static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
@@ -685,6 +686,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
int bytes_left;
struct atmlec_ioc ioc_data;
+ lockdep_assert_held(&lec_mutex);
/* Lecd must be up in this case */
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
if (bytes_left != 0)
@@ -710,6 +712,7 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
+ lockdep_assert_held(&lec_mutex);
if (arg < 0 || arg >= MAX_LEC_ITF)
return -EINVAL;
arg = array_index_nospec(arg, MAX_LEC_ITF);
@@ -725,6 +728,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
int i;
struct lec_priv *priv;
+ lockdep_assert_held(&lec_mutex);
if (arg < 0)
arg = 0;
if (arg >= MAX_LEC_ITF)
@@ -742,6 +746,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
if (register_netdev(dev_lec[i])) {
free_netdev(dev_lec[i]);
+ dev_lec[i] = NULL;
return -EINVAL;
}
@@ -904,7 +909,6 @@ static void *lec_itf_walk(struct lec_state *state, loff_t *l)
v = (dev && netdev_priv(dev)) ?
lec_priv_walk(state, l, netdev_priv(dev)) : NULL;
if (!v && dev) {
- dev_put(dev);
/* Partial state reset for the next time we get called */
dev = NULL;
}
@@ -928,6 +932,7 @@ static void *lec_seq_start(struct seq_file *seq, loff_t *pos)
{
struct lec_state *state = seq->private;
+ mutex_lock(&lec_mutex);
state->itf = 0;
state->dev = NULL;
state->locked = NULL;
@@ -945,8 +950,9 @@ static void lec_seq_stop(struct seq_file *seq, void *v)
if (state->dev) {
spin_unlock_irqrestore(&state->locked->lec_arp_lock,
state->flags);
- dev_put(state->dev);
+ state->dev = NULL;
}
+ mutex_unlock(&lec_mutex);
}
static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -1003,6 +1009,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return -ENOIOCTLCMD;
}
+ mutex_lock(&lec_mutex);
switch (cmd) {
case ATMLEC_CTRL:
err = lecd_attach(vcc, (int)arg);
@@ -1017,6 +1024,7 @@ static int lane_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
}
+ mutex_unlock(&lec_mutex);
return err;
}
diff --git a/net/atm/raw.c b/net/atm/raw.c
index 2b5f78a7ec3e..1e6511ec842c 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -36,7 +36,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
pr_debug("(%d) %d -= %d\n",
vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
- WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
+ atm_return_tx(vcc, skb);
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 995d29e7fb13..7c6fdedbcf4e 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -112,7 +112,9 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent,
if (atm_proc_dev_register(dev) < 0) {
pr_err("atm_proc_dev_register failed for dev %s\n", type);
- goto out_fail;
+ mutex_unlock(&atm_dev_mutex);
+ kfree(dev);
+ return NULL;
}
if (atm_register_sysfs(dev, parent) < 0) {
@@ -128,7 +130,7 @@ out:
return dev;
out_fail:
- kfree(dev);
+ put_device(&dev->class_dev);
dev = NULL;
goto out;
}
@@ -146,11 +148,10 @@ void atm_dev_deregister(struct atm_dev *dev)
*/
mutex_lock(&atm_dev_mutex);
list_del(&dev->dev_list);
- mutex_unlock(&atm_dev_mutex);
-
atm_dev_release_vccs(dev);
atm_unregister_sysfs(dev);
atm_proc_dev_deregister(dev);
+ mutex_unlock(&atm_dev_mutex);
atm_dev_put(dev);
}
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 1cac25aca637..f2d66af86359 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -433,6 +433,10 @@ free:
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+
skb_orphan(skb);
if (!net_eq(dev_net(dev), &init_net)) {
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 20b316207f9a..c299e2bc87ed 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -53,19 +53,6 @@ config BATMAN_ADV_DAT
mesh networks. If you think that your network does not need
this option you can safely remove it and save some space.
-config BATMAN_ADV_NC
- bool "Network Coding"
- depends on BATMAN_ADV
- help
- This option enables network coding, a mechanism that aims to
- increase the overall network throughput by fusing multiple
- packets in one transmission.
- Note that interfaces controlled by batman-adv must be manually
- configured to have promiscuous mode enabled in order to make
- network coding work.
- If you think that your network does not need this feature you
- can safely disable it and save some space.
-
config BATMAN_ADV_MCAST
bool "Multicast optimisation"
depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 1cc9be6de456..d3c4d4143c14 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,7 +23,6 @@ batman-adv-y += mesh-interface.o
batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast_forw.o
batman-adv-y += netlink.o
-batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
batman-adv-y += originator.o
batman-adv-y += routing.o
batman-adv-y += send.o
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index c0c982b6f029..49e5861b58ec 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -14,6 +14,7 @@
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <uapi/linux/batman_adv.h>
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 2c486374af58..7ce9abbdb4b4 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -11,10 +11,8 @@
#include <linux/netlink.h>
#include <linux/skbuff.h>
-#include <linux/types.h>
extern char batadv_routing_algo[];
-extern struct list_head batadv_hardif_list;
void batadv_algo_init(void);
struct batadv_algo_ops *batadv_algo_get(const char *name);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 458879d21d66..b75c2228e69a 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -52,7 +52,6 @@
#include "hash.h"
#include "log.h"
#include "netlink.h"
-#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
@@ -791,6 +790,7 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *primary_if, *tmp_hard_iface;
int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
+ struct list_head *iter;
u32 seqno;
u16 tvlv_len = 0;
unsigned long send_time;
@@ -847,10 +847,7 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
* interfaces.
*/
rcu_read_lock();
- list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
- if (tmp_hard_iface->mesh_iface != hard_iface->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(hard_iface->mesh_iface, tmp_hard_iface, iter) {
if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
continue;
@@ -1408,10 +1405,6 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
if (!orig_neigh_node)
goto out;
- /* Update nc_nodes of the originator */
- batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node,
- ogm_packet, is_single_hop_neigh);
-
orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
if_outgoing);
@@ -1567,6 +1560,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
bool is_my_oldorig = false;
bool is_my_addr = false;
bool is_my_orig = false;
+ struct list_head *iter;
ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset);
ethhdr = eth_hdr(skb);
@@ -1603,11 +1597,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
ogm_packet->version, has_directlink_flag);
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->if_status != BATADV_IF_ACTIVE)
- continue;
- if (hard_iface->mesh_iface != if_incoming->mesh_iface)
+ netdev_for_each_lower_private_rcu(if_incoming->mesh_iface, hard_iface, iter) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (batadv_compare_eth(ethhdr->h_source,
@@ -1668,13 +1660,10 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
if_incoming, BATADV_IF_DEFAULT);
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
@@ -2142,6 +2131,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_hard_iface *single_hardif)
{
struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
int i_hardif = 0;
int i_hardif_s = cb->args[0];
int idx = cb->args[1];
@@ -2158,11 +2148,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
i_hardif++;
}
} else {
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list,
- list) {
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (i_hardif++ < i_hardif_s)
continue;
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index c16c2e60889d..de9444714264 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -212,6 +212,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_hard_iface *single_hardif)
{
struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
int i_hardif = 0;
int i_hardif_s = cb->args[0];
int idx = cb->args[1];
@@ -227,10 +228,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
i_hardif++;
}
} else {
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (i_hardif++ < i_hardif_s)
continue;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 70d6778da0d7..cb16c1ed2a58 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -35,7 +35,6 @@
#include <net/cfg80211.h>
#include <uapi/linux/batadv_packet.h>
-#include "bat_algo.h"
#include "bat_v_ogm.h"
#include "hard-interface.h"
#include "log.h"
@@ -472,15 +471,12 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
{
struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
/* update orig field of every elp iface belonging to this mesh */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (primary_iface->mesh_iface != hard_iface->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(primary_iface->mesh_iface, hard_iface, iter)
batadv_v_elp_iface_activate(primary_iface, hard_iface);
- }
rcu_read_unlock();
}
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index b86bb647da5b..e3870492dab7 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -22,7 +22,6 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/random.h>
-#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -33,7 +32,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
-#include "bat_algo.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
@@ -265,6 +263,7 @@ static void batadv_v_ogm_send_meshif(struct batadv_priv *bat_priv)
struct batadv_ogm2_packet *ogm_packet;
struct sk_buff *skb, *skb_tmp;
unsigned char *ogm_buff;
+ struct list_head *iter;
int ogm_buff_len;
u16 tvlv_len = 0;
int ret;
@@ -301,10 +300,7 @@ static void batadv_v_ogm_send_meshif(struct batadv_priv *bat_priv)
/* broadcast on every interface */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
@@ -859,6 +855,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
struct batadv_hard_iface *hard_iface;
struct batadv_ogm2_packet *ogm_packet;
u32 ogm_throughput, link_throughput, path_throughput;
+ struct list_head *iter;
int ret;
ethhdr = eth_hdr(skb);
@@ -921,13 +918,10 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
BATADV_IF_DEFAULT);
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 747755647c6a..b992ba12aa24 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/crc16.h>
+#include <linux/crc32.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -1585,6 +1586,39 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
}
/**
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
+ * the header
+ * @skb: skb pointing to fragmented socket buffers
+ * @payload_ptr: Pointer to position inside the head buffer of the skb
+ * marking the start of the data to be CRC'ed
+ *
+ * payload_ptr must always point to an address in the skb head buffer and not to
+ * a fragment.
+ *
+ * Return: big endian crc32c of the checksummed data
+ */
+static __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
+{
+ unsigned int to = skb->len;
+ unsigned int consumed = 0;
+ struct skb_seq_state st;
+ unsigned int from;
+ unsigned int len;
+ const u8 *data;
+ u32 crc = 0;
+
+ from = (unsigned int)(payload_ptr - skb->data);
+
+ skb_prepare_seq_read(skb, from, to, &st);
+ while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
+ crc = crc32c(crc, data, len);
+ consumed += len;
+ }
+
+ return htonl(crc);
+}
+
+/**
* batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the mesh interface information
* @skb: contains the multicast packet to be checked
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 558d39dffc23..5113f879736b 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -22,6 +22,7 @@
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/notifier.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
@@ -438,15 +439,13 @@ out:
}
static struct batadv_hard_iface *
-batadv_hardif_get_active(const struct net_device *mesh_iface)
+batadv_hardif_get_active(struct net_device *mesh_iface)
{
struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
if (hard_iface->if_status == BATADV_IF_ACTIVE &&
kref_get_unless_zero(&hard_iface->refcount))
goto out;
@@ -508,19 +507,17 @@ batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
static void batadv_check_known_mac_addr(const struct batadv_hard_iface *hard_iface)
{
- const struct net_device *mesh_iface = hard_iface->mesh_iface;
+ struct net_device *mesh_iface = hard_iface->mesh_iface;
const struct batadv_hard_iface *tmp_hard_iface;
+ struct list_head *iter;
if (!mesh_iface)
return;
- list_for_each_entry(tmp_hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private(mesh_iface, tmp_hard_iface, iter) {
if (tmp_hard_iface == hard_iface)
continue;
- if (tmp_hard_iface->mesh_iface != mesh_iface)
- continue;
-
if (tmp_hard_iface->if_status == BATADV_IF_NOT_IN_USE)
continue;
@@ -545,15 +542,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *mesh_iface)
unsigned short lower_headroom = 0;
unsigned short lower_tailroom = 0;
unsigned short needed_headroom;
+ struct list_head *iter;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
continue;
- if (hard_iface->mesh_iface != mesh_iface)
- continue;
-
lower_header_len = max_t(unsigned short, lower_header_len,
hard_iface->net_dev->hard_header_len);
@@ -586,17 +581,15 @@ int batadv_hardif_min_mtu(struct net_device *mesh_iface)
{
struct batadv_priv *bat_priv = netdev_priv(mesh_iface);
const struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
int min_mtu = INT_MAX;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE &&
hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
continue;
- if (hard_iface->mesh_iface != mesh_iface)
- continue;
-
min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
}
rcu_read_unlock();
@@ -734,7 +727,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
bat_priv = netdev_priv(hard_iface->mesh_iface);
ret = netdev_master_upper_dev_link(hard_iface->net_dev,
- mesh_iface, NULL, NULL, NULL);
+ mesh_iface, hard_iface, NULL, NULL);
if (ret)
goto err_dev;
@@ -803,18 +796,15 @@ err_dev:
*
* Return: number of connected/enslaved hard interfaces
*/
-static size_t batadv_hardif_cnt(const struct net_device *mesh_iface)
+static size_t batadv_hardif_cnt(struct net_device *mesh_iface)
{
struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
size_t count = 0;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter)
count++;
- }
rcu_read_unlock();
return count;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 262a78364742..9db8a310961e 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
-#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/stddef.h>
#include <linux/types.h>
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 567afaa8df99..225b747a2048 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -51,9 +51,6 @@ enum batadv_dbg_level {
/** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
BATADV_DBG_DAT = BIT(4),
- /** @BATADV_DBG_NC: network coding related messages */
- BATADV_DBG_NC = BIT(5),
-
/** @BATADV_DBG_MCAST: multicast related messages */
BATADV_DBG_MCAST = BIT(6),
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c0bc75513355..3a35aadd8b41 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -11,7 +11,6 @@
#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
-#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gfp.h>
@@ -27,7 +26,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
-#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -54,7 +52,6 @@
#include "mesh-interface.h"
#include "multicast.h"
#include "netlink.h"
-#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
@@ -104,7 +101,6 @@ static int __init batadv_init(void)
batadv_v_init();
batadv_iv_init();
- batadv_nc_init();
batadv_tp_meter_init();
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
@@ -219,12 +215,6 @@ int batadv_mesh_init(struct net_device *mesh_iface)
goto err_dat;
}
- ret = batadv_nc_mesh_init(bat_priv);
- if (ret < 0) {
- atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
- goto err_nc;
- }
-
batadv_gw_init(bat_priv);
batadv_mcast_init(bat_priv);
@@ -233,8 +223,6 @@ int batadv_mesh_init(struct net_device *mesh_iface)
return 0;
-err_nc:
- batadv_dat_free(bat_priv);
err_dat:
batadv_bla_free(bat_priv);
err_bla:
@@ -265,7 +253,6 @@ void batadv_mesh_free(struct net_device *mesh_iface)
batadv_gw_node_free(bat_priv);
batadv_v_mesh_free(bat_priv);
- batadv_nc_mesh_free(bat_priv);
batadv_dat_free(bat_priv);
batadv_bla_free(bat_priv);
@@ -303,16 +290,14 @@ void batadv_mesh_free(struct net_device *mesh_iface)
bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
{
const struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
bool is_my_mac = false;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
is_my_mac = true;
break;
@@ -339,11 +324,6 @@ int batadv_max_header_len(void)
header_len = max_t(int, header_len,
sizeof(struct batadv_bcast_packet));
-#ifdef CONFIG_BATMAN_ADV_NC
- header_len = max_t(int, header_len,
- sizeof(struct batadv_coded_packet));
-#endif
-
return header_len + ETH_HLEN;
}
@@ -581,39 +561,6 @@ void batadv_recv_handler_unregister(u8 packet_type)
}
/**
- * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
- * the header
- * @skb: skb pointing to fragmented socket buffers
- * @payload_ptr: Pointer to position inside the head buffer of the skb
- * marking the start of the data to be CRC'ed
- *
- * payload_ptr must always point to an address in the skb head buffer and not to
- * a fragment.
- *
- * Return: big endian crc32c of the checksummed data
- */
-__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
-{
- u32 crc = 0;
- unsigned int from;
- unsigned int to = skb->len;
- struct skb_seq_state st;
- const u8 *data;
- unsigned int len;
- unsigned int consumed = 0;
-
- from = (unsigned int)(payload_ptr - skb->data);
-
- skb_prepare_seq_read(skb, from, to, &st);
- while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
- crc = crc32c(crc, data, len);
- consumed += len;
- }
-
- return htonl(crc);
-}
-
-/**
* batadv_get_vid() - extract the VLAN identifier from skb if any
* @skb: the buffer containing the packet
* @header_len: length of the batman header preceding the ethernet header
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 692109be2210..2be1ac17acaa 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2025.2"
+#define BATADV_SOURCE_VERSION "2025.4"
#endif
/* B.A.T.M.A.N. parameters */
@@ -121,8 +121,6 @@
#define BATADV_RESET_PROTECTION_MS 30000
#define BATADV_EXPECTED_SEQNO_RANGE 65536
-#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */
-
/**
* BATADV_TP_MAX_NUM - maximum number of simultaneously active tp sessions
*/
@@ -250,7 +248,6 @@ batadv_recv_handler_register(u8 packet_type,
int (*recv_handler)(struct sk_buff *,
struct batadv_hard_iface *));
void batadv_recv_handler_unregister(u8 packet_type);
-__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
/**
* batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
diff --git a/net/batman-adv/mesh-interface.c b/net/batman-adv/mesh-interface.c
index 5bbc366f974d..df7e95811ef5 100644
--- a/net/batman-adv/mesh-interface.c
+++ b/net/batman-adv/mesh-interface.c
@@ -37,6 +37,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <net/netlink.h>
+#include <net/rtnetlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
@@ -46,7 +47,6 @@
#include "gateway_client.h"
#include "hard-interface.h"
#include "multicast.h"
-#include "network-coding.h"
#include "send.h"
#include "translation-table.h"
@@ -802,8 +802,6 @@ static int batadv_meshif_init_late(struct net_device *dev)
bat_priv->primary_if = NULL;
- batadv_nc_init_bat_priv(bat_priv);
-
if (!bat_priv->algo_ops) {
ret = batadv_algo_select(bat_priv, batadv_routing_algo);
if (ret < 0)
@@ -947,17 +945,6 @@ static const struct {
{ "dat_put_rx" },
{ "dat_cached_reply_tx" },
#endif
-#ifdef CONFIG_BATMAN_ADV_NC
- { "nc_code" },
- { "nc_code_bytes" },
- { "nc_recode" },
- { "nc_recode_bytes" },
- { "nc_buffer" },
- { "nc_decode" },
- { "nc_decode_bytes" },
- { "nc_decode_failed" },
- { "nc_sniffed" },
-#endif
};
static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1101,9 +1088,9 @@ static void batadv_meshif_destroy_netlink(struct net_device *mesh_iface,
struct batadv_hard_iface *hard_iface;
struct batadv_meshif_vlan *vlan;
- list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface == mesh_iface)
- batadv_hardif_disable_interface(hard_iface);
+ while (!list_empty(&mesh_iface->adj_list.lower)) {
+ hard_iface = netdev_adjacent_get_private(mesh_iface->adj_list.lower.next);
+ batadv_hardif_disable_interface(hard_iface);
}
/* destroy the "untagged" VLAN */
diff --git a/net/batman-adv/mesh-interface.h b/net/batman-adv/mesh-interface.h
index 7ba055b2bc26..53756c5a45e0 100644
--- a/net/batman-adv/mesh-interface.h
+++ b/net/batman-adv/mesh-interface.h
@@ -13,7 +13,6 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
-#include <net/rtnetlink.h>
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
void batadv_interface_rx(struct net_device *mesh_iface,
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 5786680aff30..e8c6b0bf670f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -246,15 +246,13 @@ static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv)
{
const struct batadv_hard_iface *hard_iface;
+ struct list_head *iter;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
if (hard_iface->net_dev->mtu < IPV6_MIN_MTU) {
rcu_read_unlock();
return BATADV_NO_FLAGS;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index e7c8f9f2bb1f..78c651f634cd 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -20,7 +20,6 @@
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/limits.h>
-#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
@@ -45,7 +44,6 @@
#include "log.h"
#include "mesh-interface.h"
#include "multicast.h"
-#include "network-coding.h"
#include "originator.h"
#include "tp_meter.h"
#include "translation-table.h"
@@ -145,7 +143,6 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
[BATADV_ATTR_LOG_LEVEL] = { .type = NLA_U32 },
[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_MULTICAST_FANOUT] = { .type = NLA_U32 },
- [BATADV_ATTR_NETWORK_CODING_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_ORIG_INTERVAL] = { .type = NLA_U32 },
[BATADV_ATTR_ELP_INTERVAL] = { .type = NLA_U32 },
[BATADV_ATTR_THROUGHPUT_OVERRIDE] = { .type = NLA_U32 },
@@ -346,12 +343,6 @@ static int batadv_netlink_mesh_fill(struct sk_buff *msg,
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_MCAST */
-#ifdef CONFIG_BATMAN_ADV_NC
- if (nla_put_u8(msg, BATADV_ATTR_NETWORK_CODING_ENABLED,
- !!atomic_read(&bat_priv->network_coding)))
- goto nla_put_failure;
-#endif /* CONFIG_BATMAN_ADV_NC */
-
if (nla_put_u32(msg, BATADV_ATTR_ORIG_INTERVAL,
atomic_read(&bat_priv->orig_interval)))
goto nla_put_failure;
@@ -589,15 +580,6 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
}
#endif /* CONFIG_BATMAN_ADV_MCAST */
-#ifdef CONFIG_BATMAN_ADV_NC
- if (info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED]) {
- attr = info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED];
-
- atomic_set(&bat_priv->network_coding, !!nla_get_u8(attr));
- batadv_nc_status_update(bat_priv->mesh_iface);
- }
-#endif /* CONFIG_BATMAN_ADV_NC */
-
if (info->attrs[BATADV_ATTR_ORIG_INTERVAL]) {
u32 orig_interval;
@@ -968,6 +950,7 @@ batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_priv *bat_priv;
int portid = NETLINK_CB(cb->skb).portid;
int skip = cb->args[0];
+ struct list_head *iter;
int i = 0;
mesh_iface = batadv_netlink_get_meshif(cb);
@@ -979,10 +962,7 @@ batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
rtnl_lock();
cb->seq = batadv_hardif_generation << 1 | 1;
- list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != mesh_iface)
- continue;
-
+ netdev_for_each_lower_private(mesh_iface, hard_iface, iter) {
if (i++ < skip)
continue;
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index fe4548b974bb..4eae9e5ff135 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -11,7 +11,6 @@
#include <linux/netlink.h>
#include <linux/types.h>
-#include <net/genetlink.h>
void batadv_netlink_register(void);
void batadv_netlink_unregister(void);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
deleted file mode 100644
index 9f56308779cc..000000000000
--- a/net/batman-adv/network-coding.c
+++ /dev/null
@@ -1,1873 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) B.A.T.M.A.N. contributors:
- *
- * Martin Hundebøll, Jeppe Ledet-Pedersen
- */
-
-#include "network-coding.h"
-#include "main.h"
-
-#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/byteorder/generic.h>
-#include <linux/compiler.h>
-#include <linux/container_of.h>
-#include <linux/errno.h>
-#include <linux/etherdevice.h>
-#include <linux/gfp.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/init.h>
-#include <linux/jhash.h>
-#include <linux/jiffies.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/lockdep.h>
-#include <linux/net.h>
-#include <linux/netdevice.h>
-#include <linux/printk.h>
-#include <linux/random.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-#include <uapi/linux/batadv_packet.h>
-
-#include "hash.h"
-#include "log.h"
-#include "originator.h"
-#include "routing.h"
-#include "send.h"
-#include "tvlv.h"
-
-static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
-static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
-
-static void batadv_nc_worker(struct work_struct *work);
-static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if);
-
-/**
- * batadv_nc_init() - one-time initialization for network coding
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int __init batadv_nc_init(void)
-{
- /* Register our packet type */
- return batadv_recv_handler_register(BATADV_CODED,
- batadv_nc_recv_coded_packet);
-}
-
-/**
- * batadv_nc_start_timer() - initialise the nc periodic worker
- * @bat_priv: the bat priv with all the mesh interface information
- */
-static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
-{
- queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work,
- msecs_to_jiffies(10));
-}
-
-/**
- * batadv_nc_tvlv_container_update() - update the network coding tvlv container
- * after network coding setting change
- * @bat_priv: the bat priv with all the mesh interface information
- */
-static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
-{
- char nc_mode;
-
- nc_mode = atomic_read(&bat_priv->network_coding);
-
- switch (nc_mode) {
- case 0:
- batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
- break;
- case 1:
- batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
- NULL, 0);
- break;
- }
-}
-
-/**
- * batadv_nc_status_update() - update the network coding tvlv container after
- * network coding setting change
- * @net_dev: the mesh interface net device
- */
-void batadv_nc_status_update(struct net_device *net_dev)
-{
- struct batadv_priv *bat_priv = netdev_priv(net_dev);
-
- batadv_nc_tvlv_container_update(bat_priv);
-}
-
-/**
- * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
- * @bat_priv: the bat priv with all the mesh interface information
- * @orig: the orig_node of the ogm
- * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
- * @tvlv_value: tvlv buffer containing the gateway data
- * @tvlv_value_len: tvlv buffer length
- */
-static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig,
- u8 flags,
- void *tvlv_value, u16 tvlv_value_len)
-{
- if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
- clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
- else
- set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
-}
-
-/**
- * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping
- * @bat_priv: the bat priv with all the mesh interface information
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
-{
- bat_priv->nc.timestamp_fwd_flush = jiffies;
- bat_priv->nc.timestamp_sniffed_purge = jiffies;
-
- if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash)
- return 0;
-
- bat_priv->nc.coding_hash = batadv_hash_new(128);
- if (!bat_priv->nc.coding_hash)
- goto err;
-
- batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
- &batadv_nc_coding_hash_lock_class_key);
-
- bat_priv->nc.decoding_hash = batadv_hash_new(128);
- if (!bat_priv->nc.decoding_hash) {
- batadv_hash_destroy(bat_priv->nc.coding_hash);
- goto err;
- }
-
- batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
- &batadv_nc_decoding_hash_lock_class_key);
-
- INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
- batadv_nc_start_timer(bat_priv);
-
- batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
- NULL, NULL, BATADV_TVLV_NC, 1,
- BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
- batadv_nc_tvlv_container_update(bat_priv);
- return 0;
-
-err:
- return -ENOMEM;
-}
-
-/**
- * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
- * @bat_priv: the bat priv with all the mesh interface information
- */
-void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
-{
- atomic_set(&bat_priv->network_coding, 0);
- bat_priv->nc.min_tq = 200;
- bat_priv->nc.max_fwd_delay = 10;
- bat_priv->nc.max_buffer_time = 200;
-}
-
-/**
- * batadv_nc_init_orig() - initialise the nc fields of an orig_node
- * @orig_node: the orig_node which is going to be initialised
- */
-void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
-{
- INIT_LIST_HEAD(&orig_node->in_coding_list);
- INIT_LIST_HEAD(&orig_node->out_coding_list);
- spin_lock_init(&orig_node->in_coding_list_lock);
- spin_lock_init(&orig_node->out_coding_list_lock);
-}
-
-/**
- * batadv_nc_node_release() - release nc_node from lists and queue for free
- * after rcu grace period
- * @ref: kref pointer of the nc_node
- */
-static void batadv_nc_node_release(struct kref *ref)
-{
- struct batadv_nc_node *nc_node;
-
- nc_node = container_of(ref, struct batadv_nc_node, refcount);
-
- batadv_orig_node_put(nc_node->orig_node);
- kfree_rcu(nc_node, rcu);
-}
-
-/**
- * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
- * release it
- * @nc_node: nc_node to be free'd
- */
-static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
-{
- if (!nc_node)
- return;
-
- kref_put(&nc_node->refcount, batadv_nc_node_release);
-}
-
-/**
- * batadv_nc_path_release() - release nc_path from lists and queue for free
- * after rcu grace period
- * @ref: kref pointer of the nc_path
- */
-static void batadv_nc_path_release(struct kref *ref)
-{
- struct batadv_nc_path *nc_path;
-
- nc_path = container_of(ref, struct batadv_nc_path, refcount);
-
- kfree_rcu(nc_path, rcu);
-}
-
-/**
- * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
- * release it
- * @nc_path: nc_path to be free'd
- */
-static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
-{
- if (!nc_path)
- return;
-
- kref_put(&nc_path->refcount, batadv_nc_path_release);
-}
-
-/**
- * batadv_nc_packet_free() - frees nc packet
- * @nc_packet: the nc packet to free
- * @dropped: whether the packet is freed because is dropped
- */
-static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
- bool dropped)
-{
- if (dropped)
- kfree_skb(nc_packet->skb);
- else
- consume_skb(nc_packet->skb);
-
- batadv_nc_path_put(nc_packet->nc_path);
- kfree(nc_packet);
-}
-
-/**
- * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
- * @bat_priv: the bat priv with all the mesh interface information
- * @nc_node: the nc node to check
- *
- * Return: true if the entry has to be purged now, false otherwise
- */
-static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
- struct batadv_nc_node *nc_node)
-{
- if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
- return true;
-
- return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT);
-}
-
-/**
- * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
- * @bat_priv: the bat priv with all the mesh interface information
- * @nc_path: the nc path to check
- *
- * Return: true if the entry has to be purged now, false otherwise
- */
-static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
- struct batadv_nc_path *nc_path)
-{
- if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
- return true;
-
- /* purge the path when no packets has been added for 10 times the
- * max_fwd_delay time
- */
- return batadv_has_timed_out(nc_path->last_valid,
- bat_priv->nc.max_fwd_delay * 10);
-}
-
-/**
- * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
- * out
- * @bat_priv: the bat priv with all the mesh interface information
- * @nc_path: the nc path to check
- *
- * Return: true if the entry has to be purged now, false otherwise
- */
-static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
- struct batadv_nc_path *nc_path)
-{
- if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
- return true;
-
- /* purge the path when no packets has been added for 10 times the
- * max_buffer time
- */
- return batadv_has_timed_out(nc_path->last_valid,
- bat_priv->nc.max_buffer_time * 10);
-}
-
-/**
- * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
- * entries
- * @bat_priv: the bat priv with all the mesh interface information
- * @list: list of nc nodes
- * @lock: nc node list lock
- * @to_purge: function in charge to decide whether an entry has to be purged or
- * not. This function takes the nc node as argument and has to return
- * a boolean value: true if the entry has to be deleted, false
- * otherwise
- */
-static void
-batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
- struct list_head *list,
- spinlock_t *lock,
- bool (*to_purge)(struct batadv_priv *,
- struct batadv_nc_node *))
-{
- struct batadv_nc_node *nc_node, *nc_node_tmp;
-
- /* For each nc_node in list */
- spin_lock_bh(lock);
- list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) {
- /* if an helper function has been passed as parameter,
- * ask it if the entry has to be purged or not
- */
- if (to_purge && !to_purge(bat_priv, nc_node))
- continue;
-
- batadv_dbg(BATADV_DBG_NC, bat_priv,
- "Removing nc_node %pM -> %pM\n",
- nc_node->addr, nc_node->orig_node->orig);
- list_del_rcu(&nc_node->list);
- batadv_nc_node_put(nc_node);
- }
- spin_unlock_bh(lock);
-}
-
-/**
- * batadv_nc_purge_orig() - purges all nc node data attached of the given
- * originator
- * @bat_priv: the bat priv with all the mesh interface information
- * @orig_node: orig_node with the nc node entries to be purged
- * @to_purge: function in charge to decide whether an entry has to be purged or
- * not. This function takes the nc node as argument and has to return
- * a boolean value: true is the entry has to be deleted, false
- * otherwise
- */
-void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- bool (*to_purge)(struct batadv_priv *,
- struct batadv_nc_node *))
-{
- /* Check ingoing nc_node's of this orig_node */
- batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list,
- &orig_node->in_coding_list_lock,
- to_purge);
-
- /* Check outgoing nc_node's of this orig_node */
- batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list,
- &orig_node->out_coding_list_lock,
- to_purge);
-}
-
-/**
- * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
- * they have timed out nc nodes
- * @bat_priv: the bat priv with all the mesh interface information
- */
-static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
-{
- struct batadv_hashtable *hash = bat_priv->orig_hash;
- struct hlist_head *head;
- struct batadv_orig_node *orig_node;
- u32 i;
-
- if (!hash)
- return;
-
- /* For each orig_node */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry)
- batadv_nc_purge_orig(bat_priv, orig_node,
- batadv_nc_to_purge_nc_node);
- rcu_read_unlock();
- }
-}
-
-/**
- * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
- * unused ones
- * @bat_priv: the bat priv with all the mesh interface information
- * @hash: hash table containing the nc paths to check
- * @to_purge: function in charge to decide whether an entry has to be purged or
- * not. This function takes the nc node as argument and has to return
- * a boolean value: true is the entry has to be deleted, false
- * otherwise
- */
-static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
- struct batadv_hashtable *hash,
- bool (*to_purge)(struct batadv_priv *,
- struct batadv_nc_path *))
-{
- struct hlist_head *head;
- struct hlist_node *node_tmp;
- struct batadv_nc_path *nc_path;
- spinlock_t *lock; /* Protects lists in hash */
- u32 i;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
- lock = &hash->list_locks[i];
-
- /* For each nc_path in this bin */
- spin_lock_bh(lock);
- hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
- /* if an helper function has been passed as parameter,
- * ask it if the entry has to be purged or not
- */
- if (to_purge && !to_purge(bat_priv, nc_path))
- continue;
-
- /* purging an non-empty nc_path should never happen, but
- * is observed under high CPU load. Delay the purging
- * until next iteration to allow the packet_list to be
- * emptied first.
- */
- if (!unlikely(list_empty(&nc_path->packet_list))) {
- net_ratelimited_function(printk,
- KERN_WARNING
- "Skipping free of non-empty nc_path (%pM -> %pM)!\n",
- nc_path->prev_hop,
- nc_path->next_hop);
- continue;
- }
-
- /* nc_path is unused, so remove it */
- batadv_dbg(BATADV_DBG_NC, bat_priv,
- "Remove nc_path %pM -> %pM\n",
- nc_path->prev_hop, nc_path->next_hop);
- hlist_del_rcu(&nc_path->hash_entry);
- batadv_nc_path_put(nc_path);
- }
- spin_unlock_bh(lock);
- }
-}
-
-/**
- * batadv_nc_hash_key_gen() - computes the nc_path hash key
- * @key: buffer to hold the final hash key
- * @src: source ethernet mac address going into the hash key
- * @dst: destination ethernet mac address going into the hash key
- */
-static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
- const char *dst)
-{
- memcpy(key->prev_hop, src, sizeof(key->prev_hop));
- memcpy(key->next_hop, dst, sizeof(key->next_hop));
-}
-
-/**
- * batadv_nc_hash_choose() - compute the hash value for an nc path
- * @data: data to hash
- * @size: size of the hash table
- *
- * Return: the selected index in the hash table for the given data.
- */
-static u32 batadv_nc_hash_choose(const void *data, u32 size)
-{
- const struct batadv_nc_path *nc_path = data;
- u32 hash = 0;
-
- hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
- hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
-
- return hash % size;
-}
-
-/**
- * batadv_nc_hash_compare() - comparing function used in the network coding hash
- * tables
- * @node: node in the local table
- * @data2: second object to compare the node to
- *
- * Return: true if the two entry are the same, false otherwise
- */
-static bool batadv_nc_hash_compare(const struct hlist_node *node,
- const void *data2)
-{
- const struct batadv_nc_path *nc_path1, *nc_path2;
-
- nc_path1 = container_of(node, struct batadv_nc_path, hash_entry);
- nc_path2 = data2;
-
- /* Return 1 if the two keys are identical */
- if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
- return false;
-
- if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
- return false;
-
- return true;
-}
-
-/**
- * batadv_nc_hash_find() - search for an existing nc path and return it
- * @hash: hash table containing the nc path
- * @data: search key
- *
- * Return: the nc_path if found, NULL otherwise.
- */
-static struct batadv_nc_path *
-batadv_nc_hash_find(struct batadv_hashtable *hash,
- void *data)
-{
- struct hlist_head *head;
- struct batadv_nc_path *nc_path, *nc_path_tmp = NULL;
- int index;
-
- if (!hash)
- return NULL;
-
- index = batadv_nc_hash_choose(data, hash->size);
- head = &hash->table[index];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
- if (!batadv_nc_hash_compare(&nc_path->hash_entry, data))
- continue;
-
- if (!kref_get_unless_zero(&nc_path->refcount))
- continue;
-
- nc_path_tmp = nc_path;
- break;
- }
- rcu_read_unlock();
-
- return nc_path_tmp;
-}
-
-/**
- * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
- * @nc_packet: the nc packet to send
- */
-static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
-{
- batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
- nc_packet->skb = NULL;
- batadv_nc_packet_free(nc_packet, false);
-}
-
-/**
- * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
- * @bat_priv: the bat priv with all the mesh interface information
- * @nc_path: the nc path the packet belongs to
- * @nc_packet: the nc packet to be checked
- *
- * Checks whether the given sniffed (overheard) nc_packet has hit its buffering
- * timeout. If so, the packet is no longer kept and the entry deleted from the
- * queue. Has to be called with the appropriate locks.
- *
- * Return: false as soon as the entry in the fifo queue has not been timed out
- * yet and true otherwise.
- */
-static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
- struct batadv_nc_path *nc_path,
- struct batadv_nc_packet *nc_packet)
-{
- unsigned long timeout = bat_priv->nc.max_buffer_time;
- bool res = false;
-
- lockdep_assert_held(&nc_path->packet_list_lock);
-
- /* Packets are added to tail, so the remaining packets did not time
- * out and we can stop processing the current queue
- */
- if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
- !batadv_has_timed_out(nc_packet->timestamp, timeout))
- goto out;
-
- /* purge nc packet */
- list_del(&nc_packet->list);
- batadv_nc_packet_free(nc_packet, true);
-
- res = true;
-
-out:
- return res;
-}
-
-/**
- * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
- * @bat_priv: the bat priv with all the mesh interface information
- * @nc_path: the nc path the packet belongs to
- * @nc_packet: the nc packet to be checked
- *
- * Checks whether the given nc packet has hit its forward timeout. If so, the
- * packet is no longer delayed, immediately sent and the entry deleted from the
- * queue. Has to be called with the appropriate locks.
- *
- * Return: false as soon as the entry in the fifo queue has not been timed out
- * yet and true otherwise.
- */
-static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
- struct batadv_nc_path *nc_path,
- struct batadv_nc_packet *nc_packet)
-{
- unsigned long timeout = bat_priv->nc.max_fwd_delay;
-
- lockdep_assert_held(&nc_path->packet_list_lock);
-
- /* Packets are added to tail, so the remaining packets did not time
- * out and we can stop processing the current queue
- */
- if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
- !batadv_has_timed_out(nc_packet->timestamp, timeout))
- return false;
-
- /* Send packet */
- batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
- batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
- nc_packet->skb->len + ETH_HLEN);
- list_del(&nc_packet->list);
- batadv_nc_send_packet(nc_packet);
-
- return true;
-}
-
-/**
- * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
- * out nc packets
- * @bat_priv: the bat priv with all the mesh interface information
- * @hash: to be processed hash table
- * @process_fn: Function called to process given nc packet. Should return true
- * to encourage this function to proceed with the next packet.
- * Otherwise the rest of the current queue is skipped.
- */
-static void
-batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
- struct batadv_hashtable *hash,
- bool (*process_fn)(struct batadv_priv *,
- struct batadv_nc_path *,
- struct batadv_nc_packet *))
-{
- struct hlist_head *head;
- struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
- struct batadv_nc_path *nc_path;
- bool ret;
- int i;
-
- if (!hash)
- return;
-
- /* Loop hash table bins */
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- /* Loop coding paths */
- rcu_read_lock();
- hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
- /* Loop packets */
- spin_lock_bh(&nc_path->packet_list_lock);
- list_for_each_entry_safe(nc_packet, nc_packet_tmp,
- &nc_path->packet_list, list) {
- ret = process_fn(bat_priv, nc_path, nc_packet);
- if (!ret)
- break;
- }
- spin_unlock_bh(&nc_path->packet_list_lock);
- }
- rcu_read_unlock();
- }
-}
-
-/**
- * batadv_nc_worker() - periodic task for housekeeping related to network
- * coding
- * @work: kernel work struct
- */
-static void batadv_nc_worker(struct work_struct *work)
-{
- struct delayed_work *delayed_work;
- struct batadv_priv_nc *priv_nc;
- struct batadv_priv *bat_priv;
- unsigned long timeout;
-
- delayed_work = to_delayed_work(work);
- priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
- bat_priv = container_of(priv_nc, struct batadv_priv, nc);
-
- batadv_nc_purge_orig_hash(bat_priv);
- batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash,
- batadv_nc_to_purge_nc_path_coding);
- batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash,
- batadv_nc_to_purge_nc_path_decoding);
-
- timeout = bat_priv->nc.max_fwd_delay;
-
- if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) {
- batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash,
- batadv_nc_fwd_flush);
- bat_priv->nc.timestamp_fwd_flush = jiffies;
- }
-
- if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge,
- bat_priv->nc.max_buffer_time)) {
- batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash,
- batadv_nc_sniffed_purge);
- bat_priv->nc.timestamp_sniffed_purge = jiffies;
- }
-
- /* Schedule a new check */
- batadv_nc_start_timer(bat_priv);
-}
-
-/**
- * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
- * for coding or not
- * @bat_priv: the bat priv with all the mesh interface information
- * @orig_node: neighboring orig node which may be used as nc candidate
- * @ogm_packet: incoming ogm packet also used for the checks
- *
- * Return: true if:
- * 1) The OGM must have the most recent sequence number.
- * 2) The TTL must be decremented by one and only one.
- * 3) The OGM must be received from the first hop from orig_node.
- * 4) The TQ value of the OGM must be above bat_priv->nc.min_tq.
- */
-static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- struct batadv_ogm_packet *ogm_packet)
-{
- struct batadv_orig_ifinfo *orig_ifinfo;
- u32 last_real_seqno;
- u8 last_ttl;
-
- orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
- if (!orig_ifinfo)
- return false;
-
- last_ttl = orig_ifinfo->last_ttl;
- last_real_seqno = orig_ifinfo->last_real_seqno;
- batadv_orig_ifinfo_put(orig_ifinfo);
-
- if (last_real_seqno != ntohl(ogm_packet->seqno))
- return false;
- if (last_ttl != ogm_packet->ttl + 1)
- return false;
- if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
- return false;
- if (ogm_packet->tq < bat_priv->nc.min_tq)
- return false;
-
- return true;
-}
-
-/**
- * batadv_nc_find_nc_node() - search for an existing nc node and return it
- * @orig_node: orig node originating the ogm packet
- * @orig_neigh_node: neighboring orig node from which we received the ogm packet
- * (can be equal to orig_node)
- * @in_coding: traverse incoming or outgoing network coding list
- *
- * Return: the nc_node if found, NULL otherwise.
- */
-static struct batadv_nc_node *
-batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
- struct batadv_orig_node *orig_neigh_node,
- bool in_coding)
-{
- struct batadv_nc_node *nc_node, *nc_node_out = NULL;
- struct list_head *list;
-
- if (in_coding)
- list = &orig_neigh_node->in_coding_list;
- else
- list = &orig_neigh_node->out_coding_list;
-
- /* Traverse list of nc_nodes to orig_node */
- rcu_read_lock();
- list_for_each_entry_rcu(nc_node, list, list) {
- if (!batadv_compare_eth(nc_node->addr, orig_node->orig))
- continue;
-
- if (!kref_get_unless_zero(&nc_node->refcount))
- continue;
-
- /* Found a match */
- nc_node_out = nc_node;
- break;
- }
- rcu_read_unlock();
-
- return nc_node_out;
-}
-
-/**
- * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
- * not found
- * @bat_priv: the bat priv with all the mesh interface information
- * @orig_node: orig node originating the ogm packet
- * @orig_neigh_node: neighboring orig node from which we received the ogm packet
- * (can be equal to orig_node)
- * @in_coding: traverse incoming or outgoing network coding list
- *
- * Return: the nc_node if found or created, NULL in case of an error.
- */
-static struct batadv_nc_node *
-batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- struct batadv_orig_node *orig_neigh_node,
- bool in_coding)
-{
- struct batadv_nc_node *nc_node;
- spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
- struct list_head *list;
-
- /* Select ingoing or outgoing coding node */
- if (in_coding) {
- lock = &orig_neigh_node->in_coding_list_lock;
- list = &orig_neigh_node->in_coding_list;
- } else {
- lock = &orig_neigh_node->out_coding_list_lock;
- list = &orig_neigh_node->out_coding_list;
- }
-
- spin_lock_bh(lock);
-
- /* Check if nc_node is already added */
- nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
-
- /* Node found */
- if (nc_node)
- goto unlock;
-
- nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
- if (!nc_node)
- goto unlock;
-
- /* Initialize nc_node */
- INIT_LIST_HEAD(&nc_node->list);
- kref_init(&nc_node->refcount);
- ether_addr_copy(nc_node->addr, orig_node->orig);
- kref_get(&orig_neigh_node->refcount);
- nc_node->orig_node = orig_neigh_node;
-
- batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
- nc_node->addr, nc_node->orig_node->orig);
-
- /* Add nc_node to orig_node */
- kref_get(&nc_node->refcount);
- list_add_tail_rcu(&nc_node->list, list);
-
-unlock:
- spin_unlock_bh(lock);
-
- return nc_node;
-}
-
-/**
- * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
- * structs (best called on incoming OGMs)
- * @bat_priv: the bat priv with all the mesh interface information
- * @orig_node: orig node originating the ogm packet
- * @orig_neigh_node: neighboring orig node from which we received the ogm packet
- * (can be equal to orig_node)
- * @ogm_packet: incoming ogm packet
- * @is_single_hop_neigh: orig_node is a single hop neighbor
- */
-void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- struct batadv_orig_node *orig_neigh_node,
- struct batadv_ogm_packet *ogm_packet,
- int is_single_hop_neigh)
-{
- struct batadv_nc_node *in_nc_node = NULL;
- struct batadv_nc_node *out_nc_node = NULL;
-
- /* Check if network coding is enabled */
- if (!atomic_read(&bat_priv->network_coding))
- goto out;
-
- /* check if orig node is network coding enabled */
- if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
- goto out;
-
- /* accept ogms from 'good' neighbors and single hop neighbors */
- if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
- !is_single_hop_neigh)
- goto out;
-
- /* Add orig_node as in_nc_node on hop */
- in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node,
- orig_neigh_node, true);
- if (!in_nc_node)
- goto out;
-
- in_nc_node->last_seen = jiffies;
-
- /* Add hop as out_nc_node on orig_node */
- out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node,
- orig_node, false);
- if (!out_nc_node)
- goto out;
-
- out_nc_node->last_seen = jiffies;
-
-out:
- batadv_nc_node_put(in_nc_node);
- batadv_nc_node_put(out_nc_node);
-}
-
-/**
- * batadv_nc_get_path() - get existing nc_path or allocate a new one
- * @bat_priv: the bat priv with all the mesh interface information
- * @hash: hash table containing the nc path
- * @src: ethernet source address - first half of the nc path search key
- * @dst: ethernet destination address - second half of the nc path search key
- *
- * Return: pointer to nc_path if the path was found or created, returns NULL
- * on error.
- */
-static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
- struct batadv_hashtable *hash,
- u8 *src,
- u8 *dst)
-{
- int hash_added;
- struct batadv_nc_path *nc_path, nc_path_key;
-
- batadv_nc_hash_key_gen(&nc_path_key, src, dst);
-
- /* Search for existing nc_path */
- nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key);
-
- if (nc_path) {
- /* Set timestamp to delay removal of nc_path */
- nc_path->last_valid = jiffies;
- return nc_path;
- }
-
- /* No existing nc_path was found; create a new */
- nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC);
-
- if (!nc_path)
- return NULL;
-
- /* Initialize nc_path */
- INIT_LIST_HEAD(&nc_path->packet_list);
- spin_lock_init(&nc_path->packet_list_lock);
- kref_init(&nc_path->refcount);
- nc_path->last_valid = jiffies;
- ether_addr_copy(nc_path->next_hop, dst);
- ether_addr_copy(nc_path->prev_hop, src);
-
- batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
- nc_path->prev_hop,
- nc_path->next_hop);
-
- /* Add nc_path to hash table */
- kref_get(&nc_path->refcount);
- hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
- batadv_nc_hash_choose, &nc_path_key,
- &nc_path->hash_entry);
-
- if (hash_added < 0) {
- kfree(nc_path);
- return NULL;
- }
-
- return nc_path;
-}
-
-/**
- * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
- * selection of a receiver with slightly lower TQ than the other
- * @tq: to be weighted tq value
- *
- * Return: scaled tq value
- */
-static u8 batadv_nc_random_weight_tq(u8 tq)
-{
- /* randomize the estimated packet loss (max TQ - estimated TQ) */
- u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
-
- /* convert to (randomized) estimated tq again */
- return BATADV_TQ_MAX_VALUE - rand_tq;
-}
-
-/**
- * batadv_nc_memxor() - XOR destination with source
- * @dst: byte array to XOR into
- * @src: byte array to XOR from
- * @len: length of destination array
- */
-static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
-{
- unsigned int i;
-
- for (i = 0; i < len; ++i)
- dst[i] ^= src[i];
-}
-
-/**
- * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
- * into a coded_packet and send it
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: data skb to forward
- * @ethhdr: pointer to the ethernet header inside the skb
- * @nc_packet: structure containing the packet to the skb can be coded with
- * @neigh_node: next hop to forward packet to
- *
- * Return: true if both packets are consumed, false otherwise.
- */
-static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- struct ethhdr *ethhdr,
- struct batadv_nc_packet *nc_packet,
- struct batadv_neigh_node *neigh_node)
-{
- u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
- struct sk_buff *skb_dest, *skb_src;
- struct batadv_unicast_packet *packet1;
- struct batadv_unicast_packet *packet2;
- struct batadv_coded_packet *coded_packet;
- struct batadv_neigh_node *neigh_tmp, *router_neigh, *first_dest;
- struct batadv_neigh_node *router_coding = NULL, *second_dest;
- struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
- struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
- u8 *first_source, *second_source;
- __be32 packet_id1, packet_id2;
- size_t count;
- bool res = false;
- int coding_len;
- int unicast_size = sizeof(*packet1);
- int coded_size = sizeof(*coded_packet);
- int header_add = coded_size - unicast_size;
-
- /* TODO: do we need to consider the outgoing interface for
- * coded packets?
- */
- router_neigh = batadv_orig_router_get(neigh_node->orig_node,
- BATADV_IF_DEFAULT);
- if (!router_neigh)
- goto out;
-
- router_neigh_ifinfo = batadv_neigh_ifinfo_get(router_neigh,
- BATADV_IF_DEFAULT);
- if (!router_neigh_ifinfo)
- goto out;
-
- neigh_tmp = nc_packet->neigh_node;
- router_coding = batadv_orig_router_get(neigh_tmp->orig_node,
- BATADV_IF_DEFAULT);
- if (!router_coding)
- goto out;
-
- router_coding_ifinfo = batadv_neigh_ifinfo_get(router_coding,
- BATADV_IF_DEFAULT);
- if (!router_coding_ifinfo)
- goto out;
-
- tq_tmp = router_neigh_ifinfo->bat_iv.tq_avg;
- tq_weighted_neigh = batadv_nc_random_weight_tq(tq_tmp);
- tq_tmp = router_coding_ifinfo->bat_iv.tq_avg;
- tq_weighted_coding = batadv_nc_random_weight_tq(tq_tmp);
-
- /* Select one destination for the MAC-header dst-field based on
- * weighted TQ-values.
- */
- if (tq_weighted_neigh >= tq_weighted_coding) {
- /* Destination from nc_packet is selected for MAC-header */
- first_dest = nc_packet->neigh_node;
- first_source = nc_packet->nc_path->prev_hop;
- second_dest = neigh_node;
- second_source = ethhdr->h_source;
- packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
- packet2 = (struct batadv_unicast_packet *)skb->data;
- packet_id1 = nc_packet->packet_id;
- packet_id2 = batadv_skb_crc32(skb,
- skb->data + sizeof(*packet2));
- } else {
- /* Destination for skb is selected for MAC-header */
- first_dest = neigh_node;
- first_source = ethhdr->h_source;
- second_dest = nc_packet->neigh_node;
- second_source = nc_packet->nc_path->prev_hop;
- packet1 = (struct batadv_unicast_packet *)skb->data;
- packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
- packet_id1 = batadv_skb_crc32(skb,
- skb->data + sizeof(*packet1));
- packet_id2 = nc_packet->packet_id;
- }
-
- /* Instead of zero padding the smallest data buffer, we
- * code into the largest.
- */
- if (skb->len <= nc_packet->skb->len) {
- skb_dest = nc_packet->skb;
- skb_src = skb;
- } else {
- skb_dest = skb;
- skb_src = nc_packet->skb;
- }
-
- /* coding_len is used when decoding the packet shorter packet */
- coding_len = skb_src->len - unicast_size;
-
- if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0)
- goto out;
-
- skb_push(skb_dest, header_add);
-
- coded_packet = (struct batadv_coded_packet *)skb_dest->data;
- skb_reset_mac_header(skb_dest);
-
- coded_packet->packet_type = BATADV_CODED;
- coded_packet->version = BATADV_COMPAT_VERSION;
- coded_packet->ttl = packet1->ttl;
-
- /* Info about first unicast packet */
- ether_addr_copy(coded_packet->first_source, first_source);
- ether_addr_copy(coded_packet->first_orig_dest, packet1->dest);
- coded_packet->first_crc = packet_id1;
- coded_packet->first_ttvn = packet1->ttvn;
-
- /* Info about second unicast packet */
- ether_addr_copy(coded_packet->second_dest, second_dest->addr);
- ether_addr_copy(coded_packet->second_source, second_source);
- ether_addr_copy(coded_packet->second_orig_dest, packet2->dest);
- coded_packet->second_crc = packet_id2;
- coded_packet->second_ttl = packet2->ttl;
- coded_packet->second_ttvn = packet2->ttvn;
- coded_packet->coded_len = htons(coding_len);
-
- /* This is where the magic happens: Code skb_src into skb_dest */
- batadv_nc_memxor(skb_dest->data + coded_size,
- skb_src->data + unicast_size, coding_len);
-
- /* Update counters accordingly */
- if (BATADV_SKB_CB(skb_src)->decoded &&
- BATADV_SKB_CB(skb_dest)->decoded) {
- /* Both packets are recoded */
- count = skb_src->len + ETH_HLEN;
- count += skb_dest->len + ETH_HLEN;
- batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count);
- } else if (!BATADV_SKB_CB(skb_src)->decoded &&
- !BATADV_SKB_CB(skb_dest)->decoded) {
- /* Both packets are newly coded */
- count = skb_src->len + ETH_HLEN;
- count += skb_dest->len + ETH_HLEN;
- batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count);
- } else if (BATADV_SKB_CB(skb_src)->decoded &&
- !BATADV_SKB_CB(skb_dest)->decoded) {
- /* skb_src recoded and skb_dest is newly coded */
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
- skb_src->len + ETH_HLEN);
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
- skb_dest->len + ETH_HLEN);
- } else if (!BATADV_SKB_CB(skb_src)->decoded &&
- BATADV_SKB_CB(skb_dest)->decoded) {
- /* skb_src is newly coded and skb_dest is recoded */
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
- skb_src->len + ETH_HLEN);
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
- skb_dest->len + ETH_HLEN);
- }
-
- /* skb_src is now coded into skb_dest, so free it */
- consume_skb(skb_src);
-
- /* avoid duplicate free of skb from nc_packet */
- nc_packet->skb = NULL;
- batadv_nc_packet_free(nc_packet, false);
-
- /* Send the coded packet and return true */
- batadv_send_unicast_skb(skb_dest, first_dest);
- res = true;
-out:
- batadv_neigh_node_put(router_neigh);
- batadv_neigh_node_put(router_coding);
- batadv_neigh_ifinfo_put(router_neigh_ifinfo);
- batadv_neigh_ifinfo_put(router_coding_ifinfo);
- return res;
-}
-
-/**
- * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
- * @skb: data skb to forward
- * @dst: destination mac address of the other skb to code with
- * @src: source mac address of skb
- *
- * Whenever we network code a packet we have to check whether we received it in
- * a network coded form. If so, we may not be able to use it for coding because
- * some neighbors may also have received (overheard) the packet in the network
- * coded form without being able to decode it. It is hard to know which of the
- * neighboring nodes was able to decode the packet, therefore we can only
- * re-code the packet if the source of the previous encoded packet is involved.
- * Since the source encoded the packet we can be certain it has all necessary
- * decode information.
- *
- * Return: true if coding of a decoded packet is allowed.
- */
-static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
-{
- if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
- return false;
- return true;
-}
-
-/**
- * batadv_nc_path_search() - Find the coding path matching in_nc_node and
- * out_nc_node to retrieve a buffered packet that can be used for coding.
- * @bat_priv: the bat priv with all the mesh interface information
- * @in_nc_node: pointer to skb next hop's neighbor nc node
- * @out_nc_node: pointer to skb source's neighbor nc node
- * @skb: data skb to forward
- * @eth_dst: next hop mac address of skb
- *
- * Return: true if coding of a decoded skb is allowed.
- */
-static struct batadv_nc_packet *
-batadv_nc_path_search(struct batadv_priv *bat_priv,
- struct batadv_nc_node *in_nc_node,
- struct batadv_nc_node *out_nc_node,
- struct sk_buff *skb,
- u8 *eth_dst)
-{
- struct batadv_nc_path *nc_path, nc_path_key;
- struct batadv_nc_packet *nc_packet_out = NULL;
- struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
- struct batadv_hashtable *hash = bat_priv->nc.coding_hash;
- int idx;
-
- if (!hash)
- return NULL;
-
- /* Create almost path key */
- batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr,
- out_nc_node->addr);
- idx = batadv_nc_hash_choose(&nc_path_key, hash->size);
-
- /* Check for coding opportunities in this nc_path */
- rcu_read_lock();
- hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) {
- if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr))
- continue;
-
- if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr))
- continue;
-
- spin_lock_bh(&nc_path->packet_list_lock);
- if (list_empty(&nc_path->packet_list)) {
- spin_unlock_bh(&nc_path->packet_list_lock);
- continue;
- }
-
- list_for_each_entry_safe(nc_packet, nc_packet_tmp,
- &nc_path->packet_list, list) {
- if (!batadv_nc_skb_coding_possible(nc_packet->skb,
- eth_dst,
- in_nc_node->addr))
- continue;
-
- /* Coding opportunity is found! */
- list_del(&nc_packet->list);
- nc_packet_out = nc_packet;
- break;
- }
-
- spin_unlock_bh(&nc_path->packet_list_lock);
- break;
- }
- rcu_read_unlock();
-
- return nc_packet_out;
-}
-
-/**
- * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of
- * the skb's sender (may be equal to the originator).
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: data skb to forward
- * @eth_dst: next hop mac address of skb
- * @eth_src: source mac address of skb
- * @in_nc_node: pointer to skb next hop's neighbor nc node
- *
- * Return: an nc packet if a suitable coding packet was found, NULL otherwise.
- */
-static struct batadv_nc_packet *
-batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- u8 *eth_dst,
- u8 *eth_src,
- struct batadv_nc_node *in_nc_node)
-{
- struct batadv_orig_node *orig_node;
- struct batadv_nc_node *out_nc_node;
- struct batadv_nc_packet *nc_packet = NULL;
-
- orig_node = batadv_orig_hash_find(bat_priv, eth_src);
- if (!orig_node)
- return NULL;
-
- rcu_read_lock();
- list_for_each_entry_rcu(out_nc_node,
- &orig_node->out_coding_list, list) {
- /* Check if the skb is decoded and if recoding is possible */
- if (!batadv_nc_skb_coding_possible(skb,
- out_nc_node->addr, eth_src))
- continue;
-
- /* Search for an opportunity in this nc_path */
- nc_packet = batadv_nc_path_search(bat_priv, in_nc_node,
- out_nc_node, skb, eth_dst);
- if (nc_packet)
- break;
- }
- rcu_read_unlock();
-
- batadv_orig_node_put(orig_node);
- return nc_packet;
-}
-
-/**
- * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
- * unicast skb before it is stored for use in later decoding
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: data skb to store
- * @eth_dst_new: new destination mac address of skb
- */
-static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
- struct sk_buff *skb,
- u8 *eth_dst_new)
-{
- struct ethhdr *ethhdr;
-
- /* Copy skb header to change the mac header */
- skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
- if (!skb)
- return;
-
- /* Set the mac header as if we actually sent the packet uncoded */
- ethhdr = eth_hdr(skb);
- ether_addr_copy(ethhdr->h_source, ethhdr->h_dest);
- ether_addr_copy(ethhdr->h_dest, eth_dst_new);
-
- /* Set data pointer to MAC header to mimic packets from our tx path */
- skb_push(skb, ETH_HLEN);
-
- /* Add the packet to the decoding packet pool */
- batadv_nc_skb_store_for_decoding(bat_priv, skb);
-
- /* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
- * our ref
- */
- consume_skb(skb);
-}
-
-/**
- * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
- * @skb: data skb to forward
- * @neigh_node: next hop to forward packet to
- * @ethhdr: pointer to the ethernet header inside the skb
- *
- * Loops through the list of neighboring nodes the next hop has a good
- * connection to (receives OGMs with a sufficient quality). We need to find a
- * neighbor of our next hop that potentially sent a packet which our next hop
- * also received (overheard) and has stored for later decoding.
- *
- * Return: true if the skb was consumed (encoded packet sent) or false otherwise
- */
-static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
- struct batadv_neigh_node *neigh_node,
- struct ethhdr *ethhdr)
-{
- struct net_device *netdev = neigh_node->if_incoming->mesh_iface;
- struct batadv_priv *bat_priv = netdev_priv(netdev);
- struct batadv_orig_node *orig_node = neigh_node->orig_node;
- struct batadv_nc_node *nc_node;
- struct batadv_nc_packet *nc_packet = NULL;
-
- rcu_read_lock();
- list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) {
- /* Search for coding opportunity with this in_nc_node */
- nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
- neigh_node->addr,
- ethhdr->h_source, nc_node);
-
- /* Opportunity was found, so stop searching */
- if (nc_packet)
- break;
- }
- rcu_read_unlock();
-
- if (!nc_packet)
- return false;
-
- /* Save packets for later decoding */
- batadv_nc_skb_store_before_coding(bat_priv, skb,
- neigh_node->addr);
- batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
- nc_packet->neigh_node->addr);
-
- /* Code and send packets */
- if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
- neigh_node))
- return true;
-
- /* out of mem ? Coding failed - we have to free the buffered packet
- * to avoid memleaks. The skb passed as argument will be dealt with
- * by the calling function.
- */
- batadv_nc_send_packet(nc_packet);
- return false;
-}
-
-/**
- * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
- * @skb: skb to add to path
- * @nc_path: path to add skb to
- * @neigh_node: next hop to forward packet to
- * @packet_id: checksum to identify packet
- *
- * Return: true if the packet was buffered or false in case of an error.
- */
-static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
- struct batadv_nc_path *nc_path,
- struct batadv_neigh_node *neigh_node,
- __be32 packet_id)
-{
- struct batadv_nc_packet *nc_packet;
-
- nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC);
- if (!nc_packet)
- return false;
-
- /* Initialize nc_packet */
- nc_packet->timestamp = jiffies;
- nc_packet->packet_id = packet_id;
- nc_packet->skb = skb;
- nc_packet->neigh_node = neigh_node;
- nc_packet->nc_path = nc_path;
-
- /* Add coding packet to list */
- spin_lock_bh(&nc_path->packet_list_lock);
- list_add_tail(&nc_packet->list, &nc_path->packet_list);
- spin_unlock_bh(&nc_path->packet_list_lock);
-
- return true;
-}
-
-/**
- * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
- * buffer
- * @skb: data skb to forward
- * @neigh_node: next hop to forward packet to
- *
- * Return: true if the skb was consumed (encoded packet sent) or false otherwise
- */
-bool batadv_nc_skb_forward(struct sk_buff *skb,
- struct batadv_neigh_node *neigh_node)
-{
- const struct net_device *netdev = neigh_node->if_incoming->mesh_iface;
- struct batadv_priv *bat_priv = netdev_priv(netdev);
- struct batadv_unicast_packet *packet;
- struct batadv_nc_path *nc_path;
- struct ethhdr *ethhdr = eth_hdr(skb);
- __be32 packet_id;
- u8 *payload;
-
- /* Check if network coding is enabled */
- if (!atomic_read(&bat_priv->network_coding))
- goto out;
-
- /* We only handle unicast packets */
- payload = skb_network_header(skb);
- packet = (struct batadv_unicast_packet *)payload;
- if (packet->packet_type != BATADV_UNICAST)
- goto out;
-
- /* Try to find a coding opportunity and send the skb if one is found */
- if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
- return true;
-
- /* Find or create a nc_path for this src-dst pair */
- nc_path = batadv_nc_get_path(bat_priv,
- bat_priv->nc.coding_hash,
- ethhdr->h_source,
- neigh_node->addr);
-
- if (!nc_path)
- goto out;
-
- /* Add skb to nc_path */
- packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
- if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
- goto free_nc_path;
-
- /* Packet is consumed */
- return true;
-
-free_nc_path:
- batadv_nc_path_put(nc_path);
-out:
- /* Packet is not consumed */
- return false;
-}
-
-/**
- * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
- * used when decoding coded packets
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: data skb to store
- */
-void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
-{
- struct batadv_unicast_packet *packet;
- struct batadv_nc_path *nc_path;
- struct ethhdr *ethhdr = eth_hdr(skb);
- __be32 packet_id;
- u8 *payload;
-
- /* Check if network coding is enabled */
- if (!atomic_read(&bat_priv->network_coding))
- goto out;
-
- /* Check for supported packet type */
- payload = skb_network_header(skb);
- packet = (struct batadv_unicast_packet *)payload;
- if (packet->packet_type != BATADV_UNICAST)
- goto out;
-
- /* Find existing nc_path or create a new */
- nc_path = batadv_nc_get_path(bat_priv,
- bat_priv->nc.decoding_hash,
- ethhdr->h_source,
- ethhdr->h_dest);
-
- if (!nc_path)
- goto out;
-
- /* Clone skb and adjust skb->data to point at batman header */
- skb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto free_nc_path;
-
- if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
- goto free_skb;
-
- if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
- goto free_skb;
-
- /* Add skb to nc_path */
- packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
- if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
- goto free_skb;
-
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER);
- return;
-
-free_skb:
- kfree_skb(skb);
-free_nc_path:
- batadv_nc_path_put(nc_path);
-out:
- return;
-}
-
-/**
- * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
- * should be saved in the decoding buffer and, if so, store it there
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: unicast skb to store
- */
-void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
-{
- struct ethhdr *ethhdr = eth_hdr(skb);
-
- if (batadv_is_my_mac(bat_priv, ethhdr->h_dest))
- return;
-
- /* Set data pointer to MAC header to mimic packets from our tx path */
- skb_push(skb, ETH_HLEN);
-
- batadv_nc_skb_store_for_decoding(bat_priv, skb);
-}
-
-/**
- * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
- * in nc_packet
- * @bat_priv: the bat priv with all the mesh interface information
- * @skb: unicast skb to decode
- * @nc_packet: decode data needed to decode the skb
- *
- * Return: pointer to decoded unicast packet if the packet was decoded or NULL
- * in case of an error.
- */
-static struct batadv_unicast_packet *
-batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_nc_packet *nc_packet)
-{
- const int h_size = sizeof(struct batadv_unicast_packet);
- const int h_diff = sizeof(struct batadv_coded_packet) - h_size;
- struct batadv_unicast_packet *unicast_packet;
- struct batadv_coded_packet coded_packet_tmp;
- struct ethhdr *ethhdr, ethhdr_tmp;
- u8 *orig_dest, ttl, ttvn;
- unsigned int coding_len;
- int err;
-
- /* Save headers temporarily */
- memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
- memcpy(&ethhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
-
- if (skb_cow(skb, 0) < 0)
- return NULL;
-
- if (unlikely(!skb_pull_rcsum(skb, h_diff)))
- return NULL;
-
- /* Data points to batman header, so set mac header 14 bytes before
- * and network to data
- */
- skb_set_mac_header(skb, -ETH_HLEN);
- skb_reset_network_header(skb);
-
- /* Reconstruct original mac header */
- ethhdr = eth_hdr(skb);
- *ethhdr = ethhdr_tmp;
-
- /* Select the correct unicast header information based on the location
- * of our mac address in the coded_packet header
- */
- if (batadv_is_my_mac(bat_priv, coded_packet_tmp.second_dest)) {
- /* If we are the second destination the packet was overheard,
- * so the Ethernet address must be copied to h_dest and
- * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
- */
- ether_addr_copy(ethhdr->h_dest, coded_packet_tmp.second_dest);
- skb->pkt_type = PACKET_HOST;
-
- orig_dest = coded_packet_tmp.second_orig_dest;
- ttl = coded_packet_tmp.second_ttl;
- ttvn = coded_packet_tmp.second_ttvn;
- } else {
- orig_dest = coded_packet_tmp.first_orig_dest;
- ttl = coded_packet_tmp.ttl;
- ttvn = coded_packet_tmp.first_ttvn;
- }
-
- coding_len = ntohs(coded_packet_tmp.coded_len);
-
- if (coding_len > skb->len)
- return NULL;
-
- /* Here the magic is reversed:
- * extract the missing packet from the received coded packet
- */
- batadv_nc_memxor(skb->data + h_size,
- nc_packet->skb->data + h_size,
- coding_len);
-
- /* Resize decoded skb if decoded with larger packet */
- if (nc_packet->skb->len > coding_len + h_size) {
- err = pskb_trim_rcsum(skb, coding_len + h_size);
- if (err)
- return NULL;
- }
-
- /* Create decoded unicast packet */
- unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->packet_type = BATADV_UNICAST;
- unicast_packet->version = BATADV_COMPAT_VERSION;
- unicast_packet->ttl = ttl;
- ether_addr_copy(unicast_packet->dest, orig_dest);
- unicast_packet->ttvn = ttvn;
-
- batadv_nc_packet_free(nc_packet, false);
- return unicast_packet;
-}
-
-/**
- * batadv_nc_find_decoding_packet() - search through buffered decoding data to
- * find the data needed to decode the coded packet
- * @bat_priv: the bat priv with all the mesh interface information
- * @ethhdr: pointer to the ethernet header inside the coded packet
- * @coded: coded packet we try to find decode data for
- *
- * Return: pointer to nc packet if the needed data was found or NULL otherwise.
- */
-static struct batadv_nc_packet *
-batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
- struct ethhdr *ethhdr,
- struct batadv_coded_packet *coded)
-{
- struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
- struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
- struct batadv_nc_path *nc_path, nc_path_key;
- u8 *dest, *source;
- __be32 packet_id;
- int index;
-
- if (!hash)
- return NULL;
-
- /* Select the correct packet id based on the location of our mac-addr */
- dest = ethhdr->h_source;
- if (!batadv_is_my_mac(bat_priv, coded->second_dest)) {
- source = coded->second_source;
- packet_id = coded->second_crc;
- } else {
- source = coded->first_source;
- packet_id = coded->first_crc;
- }
-
- batadv_nc_hash_key_gen(&nc_path_key, source, dest);
- index = batadv_nc_hash_choose(&nc_path_key, hash->size);
-
- /* Search for matching coding path */
- rcu_read_lock();
- hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) {
- /* Find matching nc_packet */
- spin_lock_bh(&nc_path->packet_list_lock);
- list_for_each_entry(tmp_nc_packet,
- &nc_path->packet_list, list) {
- if (packet_id == tmp_nc_packet->packet_id) {
- list_del(&tmp_nc_packet->list);
-
- nc_packet = tmp_nc_packet;
- break;
- }
- }
- spin_unlock_bh(&nc_path->packet_list_lock);
-
- if (nc_packet)
- break;
- }
- rcu_read_unlock();
-
- if (!nc_packet)
- batadv_dbg(BATADV_DBG_NC, bat_priv,
- "No decoding packet found for %u\n", packet_id);
-
- return nc_packet;
-}
-
-/**
- * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
- * resulting unicast packet
- * @skb: incoming coded packet
- * @recv_if: pointer to interface this packet was received on
- *
- * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
- * otherwise.
- */
-static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
- struct batadv_hard_iface *recv_if)
-{
- struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface);
- struct batadv_unicast_packet *unicast_packet;
- struct batadv_coded_packet *coded_packet;
- struct batadv_nc_packet *nc_packet;
- struct ethhdr *ethhdr;
- int hdr_size = sizeof(*coded_packet);
-
- /* Check if network coding is enabled */
- if (!atomic_read(&bat_priv->network_coding))
- goto free_skb;
-
- /* Make sure we can access (and remove) header */
- if (unlikely(!pskb_may_pull(skb, hdr_size)))
- goto free_skb;
-
- coded_packet = (struct batadv_coded_packet *)skb->data;
- ethhdr = eth_hdr(skb);
-
- /* Verify frame is destined for us */
- if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
- !batadv_is_my_mac(bat_priv, coded_packet->second_dest))
- goto free_skb;
-
- /* Update stat counter */
- if (batadv_is_my_mac(bat_priv, coded_packet->second_dest))
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED);
-
- nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr,
- coded_packet);
- if (!nc_packet) {
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
- goto free_skb;
- }
-
- /* Make skb's linear, because decoding accesses the entire buffer */
- if (skb_linearize(skb) < 0)
- goto free_nc_packet;
-
- if (skb_linearize(nc_packet->skb) < 0)
- goto free_nc_packet;
-
- /* Decode the packet */
- unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet);
- if (!unicast_packet) {
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
- goto free_nc_packet;
- }
-
- /* Mark packet as decoded to do correct recoding when forwarding */
- BATADV_SKB_CB(skb)->decoded = true;
- batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE);
- batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES,
- skb->len + ETH_HLEN);
- return batadv_recv_unicast_packet(skb, recv_if);
-
-free_nc_packet:
- batadv_nc_packet_free(nc_packet, true);
-free_skb:
- kfree_skb(skb);
-
- return NET_RX_DROP;
-}
-
-/**
- * batadv_nc_mesh_free() - clean up network coding memory
- * @bat_priv: the bat priv with all the mesh interface information
- */
-void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
-{
- batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
- batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
- cancel_delayed_work_sync(&bat_priv->nc.work);
-
- batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
- batadv_hash_destroy(bat_priv->nc.coding_hash);
- batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
- batadv_hash_destroy(bat_priv->nc.decoding_hash);
-}
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
deleted file mode 100644
index 368cc3130e4c..000000000000
--- a/net/batman-adv/network-coding.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) B.A.T.M.A.N. contributors:
- *
- * Martin Hundebøll, Jeppe Ledet-Pedersen
- */
-
-#ifndef _NET_BATMAN_ADV_NETWORK_CODING_H_
-#define _NET_BATMAN_ADV_NETWORK_CODING_H_
-
-#include "main.h"
-
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/types.h>
-#include <uapi/linux/batadv_packet.h>
-
-#ifdef CONFIG_BATMAN_ADV_NC
-
-void batadv_nc_status_update(struct net_device *net_dev);
-int batadv_nc_init(void);
-int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
-void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
-void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- struct batadv_orig_node *orig_neigh_node,
- struct batadv_ogm_packet *ogm_packet,
- int is_single_hop_neigh);
-void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- bool (*to_purge)(struct batadv_priv *,
- struct batadv_nc_node *));
-void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv);
-void batadv_nc_init_orig(struct batadv_orig_node *orig_node);
-bool batadv_nc_skb_forward(struct sk_buff *skb,
- struct batadv_neigh_node *neigh_node);
-void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
- struct sk_buff *skb);
-void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
- struct sk_buff *skb);
-
-#else /* ifdef CONFIG_BATMAN_ADV_NC */
-
-static inline void batadv_nc_status_update(struct net_device *net_dev)
-{
-}
-
-static inline int batadv_nc_init(void)
-{
- return 0;
-}
-
-static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
-{
- return 0;
-}
-
-static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
-{
-}
-
-static inline void
-batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- struct batadv_orig_node *orig_neigh_node,
- struct batadv_ogm_packet *ogm_packet,
- int is_single_hop_neigh)
-{
-}
-
-static inline void
-batadv_nc_purge_orig(struct batadv_priv *bat_priv,
- struct batadv_orig_node *orig_node,
- bool (*to_purge)(struct batadv_priv *,
- struct batadv_nc_node *))
-{
-}
-
-static inline void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
-{
-}
-
-static inline void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
-{
-}
-
-static inline bool batadv_nc_skb_forward(struct sk_buff *skb,
- struct batadv_neigh_node *neigh_node)
-{
- return false;
-}
-
-static inline void
-batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
-{
-}
-
-static inline void
-batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
-{
-}
-
-#endif /* ifdef CONFIG_BATMAN_ADV_NC */
-
-#endif /* _NET_BATMAN_ADV_NETWORK_CODING_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index d9cfc5c6b208..c84420cb410d 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -29,7 +29,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
-#include "bat_algo.h"
#include "distributed-arp-table.h"
#include "fragmentation.h"
#include "gateway_client.h"
@@ -38,7 +37,6 @@
#include "log.h"
#include "multicast.h"
#include "netlink.h"
-#include "network-coding.h"
#include "routing.h"
#include "translation-table.h"
@@ -884,9 +882,6 @@ void batadv_orig_node_release(struct kref *ref)
}
spin_unlock_bh(&orig_node->vlan_list_lock);
- /* Free nc_nodes */
- batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
-
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
@@ -960,8 +955,6 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
spin_lock_init(&orig_node->tt_lock);
spin_lock_init(&orig_node->vlan_list_lock);
- batadv_nc_init_orig(orig_node);
-
/* extra reference for return */
kref_init(&orig_node->refcount);
@@ -1208,6 +1201,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
struct batadv_neigh_node *best_neigh_node;
struct batadv_hard_iface *hard_iface;
bool changed_ifinfo, changed_neigh;
+ struct list_head *iter;
if (batadv_has_timed_out(orig_node->last_seen,
2 * BATADV_PURGE_TIMEOUT)) {
@@ -1232,13 +1226,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
/* ... then for all other interfaces. */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 35d8c5783999..12c16f81cc51 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -31,7 +31,6 @@
#include "hard-interface.h"
#include "log.h"
#include "mesh-interface.h"
-#include "network-coding.h"
#include "originator.h"
#include "send.h"
#include "tp_meter.h"
@@ -956,15 +955,9 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
/* function returns -EREMOTE for promiscuous packets */
check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
-
- /* Even though the packet is not for us, we might save it to use for
- * decoding a later received coded packet
- */
- if (check == -EREMOTE)
- batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
-
if (check < 0)
goto free_skb;
+
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
goto free_skb;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 9d72f4f15b3d..20d85c681064 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -21,7 +21,6 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
-#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -35,7 +34,6 @@
#include "hard-interface.h"
#include "log.h"
#include "mesh-interface.h"
-#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "translation-table.h"
@@ -64,12 +62,9 @@ int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
const u8 *dst_addr)
{
- struct batadv_priv *bat_priv;
struct ethhdr *ethhdr;
int ret;
- bat_priv = netdev_priv(hard_iface->mesh_iface);
-
if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto send_skb_err;
@@ -98,9 +93,6 @@ int batadv_send_skb_packet(struct sk_buff *skb,
skb->dev = hard_iface->net_dev;
- /* Save a clone of the skb to use when decoding coded packets */
- batadv_nc_skb_store_for_decoding(bat_priv, skb);
-
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error.
@@ -203,14 +195,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
goto put_neigh_node;
}
- /* try to network code the packet, if it is received on an interface
- * (i.e. being forwarded). If the packet originates from this node or if
- * network coding fails, then send the packet as usual.
- */
- if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
- ret = -EINPROGRESS;
- else
- ret = batadv_send_unicast_skb(skb, neigh_node);
+ ret = batadv_send_unicast_skb(skb, neigh_node);
/* skb was consumed */
skb = NULL;
@@ -924,6 +909,7 @@ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
{
struct batadv_hard_iface *hard_iface;
struct batadv_hard_iface *primary_if;
+ struct list_head *iter;
int ret = NETDEV_TX_OK;
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -931,10 +917,7 @@ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
return NETDEV_TX_BUSY;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
- if (hard_iface->mesh_iface != bat_priv->mesh_iface)
- continue;
-
+ netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8d0e04e770cb..6e95e883c2bf 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -212,7 +212,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
/**
* batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
- * @ref: kref pointer of the nc_node
+ * @ref: kref pointer of the batadv_tt_local_entry
*/
static void batadv_tt_local_entry_release(struct kref *ref)
{
@@ -244,7 +244,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
/**
* batadv_tt_global_entry_release() - release tt_global_entry from lists and
* queue for free after rcu grace period
- * @ref: kref pointer of the nc_node
+ * @ref: kref pointer of the batadv_tt_global_entry
*/
void batadv_tt_global_entry_release(struct kref *ref)
{
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 0ca0fc072fc9..ae1d7a8dc480 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -505,20 +505,6 @@ struct batadv_orig_node {
/** @rcu: struct used for freeing in an RCU-safe manner */
struct rcu_head rcu;
-#ifdef CONFIG_BATMAN_ADV_NC
- /** @in_coding_list: list of nodes this orig can hear */
- struct list_head in_coding_list;
-
- /** @out_coding_list: list of nodes that can hear this orig */
- struct list_head out_coding_list;
-
- /** @in_coding_list_lock: protects in_coding_list */
- spinlock_t in_coding_list_lock;
-
- /** @out_coding_list_lock: protects out_coding_list */
- spinlock_t out_coding_list_lock;
-#endif
-
/** @fragments: array with heads for fragment chains */
struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
@@ -545,9 +531,6 @@ enum batadv_orig_capabilities {
*/
BATADV_ORIG_CAPA_HAS_DAT,
- /** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
- BATADV_ORIG_CAPA_HAS_NC,
-
/** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
BATADV_ORIG_CAPA_HAS_TT,
@@ -953,60 +936,6 @@ enum batadv_counters {
BATADV_CNT_DAT_CACHED_REPLY_TX,
#endif
-#ifdef CONFIG_BATMAN_ADV_NC
- /**
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- */
- BATADV_CNT_NC_CODE,
-
- /**
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
- * counter
- */
- BATADV_CNT_NC_CODE_BYTES,
-
- /**
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
- * counter
- */
- BATADV_CNT_NC_RECODE,
-
- /**
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
- * counter
- */
- BATADV_CNT_NC_RECODE_BYTES,
-
- /**
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
- * decoding
- */
- BATADV_CNT_NC_BUFFER,
-
- /**
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- */
- BATADV_CNT_NC_DECODE,
-
- /**
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
- * counter
- */
- BATADV_CNT_NC_DECODE_BYTES,
-
- /**
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
- * packet counter
- */
- BATADV_CNT_NC_DECODE_FAILED,
-
- /**
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
- * promisc mode.
- */
- BATADV_CNT_NC_SNIFFED,
-#endif
-
/** @BATADV_CNT_NUM: number of traffic counters */
BATADV_CNT_NUM,
};
@@ -1340,56 +1269,6 @@ struct batadv_priv_mcast {
#endif
/**
- * struct batadv_priv_nc - per mesh interface network coding private data
- */
-struct batadv_priv_nc {
- /** @work: work queue callback item for cleanup */
- struct delayed_work work;
-
- /**
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- */
- u8 min_tq;
-
- /**
- * @max_fwd_delay: maximum packet forward delay to allow coding of
- * packets
- */
- u32 max_fwd_delay;
-
- /**
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- */
- u32 max_buffer_time;
-
- /**
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- */
- unsigned long timestamp_fwd_flush;
-
- /**
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
- * purge
- */
- unsigned long timestamp_sniffed_purge;
-
- /**
- * @coding_hash: Hash table used to buffer skbs while waiting for
- * another incoming skb to code it with. Skbs are added to the buffer
- * just before being forwarded in routing.c
- */
- struct batadv_hashtable *coding_hash;
-
- /**
- * @decoding_hash: Hash table used to buffer skbs that might be needed
- * to decode a received coded skb. The buffer is used for 1) skbs
- * arriving on the mesh-interface; 2) skbs overheard on the
- * hard-interface; and 3) skbs forwarded by batman-adv.
- */
- struct batadv_hashtable *decoding_hash;
-};
-
-/**
* struct batadv_tp_unacked - unacked packet meta-information
*
* This struct is supposed to represent a buffer unacked packet. However, since
@@ -1775,16 +1654,6 @@ struct batadv_priv {
struct batadv_priv_mcast mcast;
#endif
-#ifdef CONFIG_BATMAN_ADV_NC
- /**
- * @network_coding: bool indicating whether network coding is enabled
- */
- atomic_t network_coding;
-
- /** @nc: network coding data */
- struct batadv_priv_nc nc;
-#endif /* CONFIG_BATMAN_ADV_NC */
-
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
/** @bat_v: B.A.T.M.A.N. V per mesh-interface private data */
struct batadv_priv_bat_v bat_v;
@@ -2017,95 +1886,10 @@ struct batadv_tt_roam_node {
};
/**
- * struct batadv_nc_node - network coding node
- */
-struct batadv_nc_node {
- /** @list: next and prev pointer for the list handling */
- struct list_head list;
-
- /** @addr: the node's mac address */
- u8 addr[ETH_ALEN];
-
- /** @refcount: number of contexts the object is used by */
- struct kref refcount;
-
- /** @rcu: struct used for freeing in an RCU-safe manner */
- struct rcu_head rcu;
-
- /** @orig_node: pointer to corresponding orig node struct */
- struct batadv_orig_node *orig_node;
-
- /** @last_seen: timestamp of last ogm received from this node */
- unsigned long last_seen;
-};
-
-/**
- * struct batadv_nc_path - network coding path
- */
-struct batadv_nc_path {
- /** @hash_entry: next and prev pointer for the list handling */
- struct hlist_node hash_entry;
-
- /** @rcu: struct used for freeing in an RCU-safe manner */
- struct rcu_head rcu;
-
- /** @refcount: number of contexts the object is used by */
- struct kref refcount;
-
- /** @packet_list: list of buffered packets for this path */
- struct list_head packet_list;
-
- /** @packet_list_lock: access lock for packet list */
- spinlock_t packet_list_lock;
-
- /** @next_hop: next hop (destination) of path */
- u8 next_hop[ETH_ALEN];
-
- /** @prev_hop: previous hop (source) of path */
- u8 prev_hop[ETH_ALEN];
-
- /** @last_valid: timestamp for last validation of path */
- unsigned long last_valid;
-};
-
-/**
- * struct batadv_nc_packet - network coding packet used when coding and
- * decoding packets
- */
-struct batadv_nc_packet {
- /** @list: next and prev pointer for the list handling */
- struct list_head list;
-
- /** @packet_id: crc32 checksum of skb data */
- __be32 packet_id;
-
- /**
- * @timestamp: field containing the info when the packet was added to
- * path
- */
- unsigned long timestamp;
-
- /** @neigh_node: pointer to original next hop neighbor of skb */
- struct batadv_neigh_node *neigh_node;
-
- /** @skb: skb which can be encoded or used for decoding */
- struct sk_buff *skb;
-
- /** @nc_path: pointer to path this nc packet is attached to */
- struct batadv_nc_path *nc_path;
-};
-
-/**
* struct batadv_skb_cb - control buffer structure used to store private data
* relevant to batman-adv in the skb->cb buffer in skbs.
*/
struct batadv_skb_cb {
- /**
- * @decoded: Marks a skb as decoded, which is checked when searching for
- * coding opportunities in network-coding.c
- */
- unsigned char decoded:1;
-
/** @num_bcasts: Counter for broadcast packet retransmissions */
unsigned char num_bcasts;
};
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6ad2f72f53f4..2b94e2077203 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -364,6 +364,13 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS,
sizeof(pkt_status), &pkt_status);
}
+
+ if (test_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags)) {
+ u16 pkt_seqnum = hci_skb_pkt_seqnum(skb);
+
+ put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_SEQNUM,
+ sizeof(pkt_seqnum), &pkt_seqnum);
+ }
}
skb_free_datagram(sk, skb);
@@ -815,7 +822,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
refcount_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
- from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
bt->parent ? sock_i_ino(bt->parent) : 0LU);
diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c
index 1d67836e95e1..59025771af53 100644
--- a/net/bluetooth/aosp.c
+++ b/net/bluetooth/aosp.c
@@ -70,7 +70,7 @@ void aosp_do_open(struct hci_dev *hdev)
rp = (struct aosp_rp_le_get_vendor_capa *)skb->data;
version_supported = le16_to_cpu(rp->version_supported);
- /* AOSP displays the verion number like v0.98, v1.00, etc. */
+ /* AOSP displays the version number like v0.98, v1.00, etc. */
bt_dev_info(hdev, "AOSP extensions version v%u.%02u",
version_supported >> 8, version_supported & 0xff);
diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c
index 819eacb38762..720cb79adf96 100644
--- a/net/bluetooth/coredump.c
+++ b/net/bluetooth/coredump.c
@@ -249,15 +249,15 @@ static void hci_devcd_dump(struct hci_dev *hdev)
size = hdev->dump.tail - hdev->dump.head;
- /* Emit a devcoredump with the available data */
- dev_coredumpv(&hdev->dev, hdev->dump.head, size, GFP_KERNEL);
-
/* Send a copy to monitor as a diagnostic packet */
skb = bt_skb_alloc(size, GFP_ATOMIC);
if (skb) {
skb_put_data(skb, hdev->dump.head, size);
hci_recv_diag(hdev, skb);
}
+
+ /* Emit a devcoredump with the available data */
+ dev_coredumpv(&hdev->dev, hdev->dump.head, size, GFP_KERNEL);
}
static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 4f379184df5b..111f0e37b672 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -149,8 +149,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_chan_list_flush(conn);
- hci_conn_hash_del(hdev, conn);
-
if (HCI_CONN_HANDLE_UNSET(conn->handle))
ida_free(&hdev->unset_handle_ida, conn->handle);
@@ -339,7 +337,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
case BT_CODEC_TRANSPARENT:
if (!find_next_esco_param(conn, esco_param_msbc,
ARRAY_SIZE(esco_param_msbc)))
- return false;
+ return -EINVAL;
+
param = &esco_param_msbc[conn->attempt - 1];
cp.tx_coding_format.id = 0x03;
cp.rx_coding_format.id = 0x03;
@@ -785,7 +784,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
d->sync_handle = conn->sync_handle;
if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
- hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
+ hci_conn_hash_list_flag(hdev, find_bis, PA_LINK,
HCI_CONN_PA_SYNC, d);
if (!d->count)
@@ -814,7 +813,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
*
* Detects if there any BIS left connected in a BIG
* broadcaster: Remove advertising instance and terminate BIG.
- * broadcaster receiver: Teminate BIG sync and terminate PA sync.
+ * broadcaster receiver: Terminate BIG sync and terminate PA sync.
*/
static void bis_cleanup(struct hci_conn *conn)
{
@@ -830,7 +829,17 @@ static void bis_cleanup(struct hci_conn *conn)
/* Check if ISO connection is a BIS and terminate advertising
* set and BIG if there are no other connections using it.
*/
- bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
+ bis = hci_conn_hash_lookup_big_state(hdev,
+ conn->iso_qos.bcast.big,
+ BT_CONNECTED,
+ HCI_ROLE_MASTER);
+ if (bis)
+ return;
+
+ bis = hci_conn_hash_lookup_big_state(hdev,
+ conn->iso_qos.bcast.big,
+ BT_CONNECT,
+ HCI_ROLE_MASTER);
if (bis)
return;
@@ -914,10 +923,10 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
break;
case CIS_LINK:
case BIS_LINK:
- if (hdev->iso_mtu)
- /* Dedicated ISO Buffer exists */
- break;
- fallthrough;
+ case PA_LINK:
+ if (!hdev->iso_mtu)
+ return ERR_PTR(-ECONNREFUSED);
+ break;
case LE_LINK:
if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
return ERR_PTR(-ECONNREFUSED);
@@ -979,6 +988,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
break;
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
/* conn->src should reflect the local identity address */
hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
@@ -1033,7 +1043,6 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
}
hci_conn_init_sysfs(conn);
-
return conn;
}
@@ -1077,6 +1086,7 @@ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
break;
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
if ((conn->state != BT_CONNECTED &&
!test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
@@ -1139,27 +1149,54 @@ void hci_conn_del(struct hci_conn *conn)
disable_delayed_work_sync(&conn->auto_accept_work);
disable_delayed_work_sync(&conn->idle_work);
- if (conn->type == ACL_LINK) {
- /* Unacked frames */
- hdev->acl_cnt += conn->sent;
- } else if (conn->type == LE_LINK) {
- cancel_delayed_work(&conn->le_conn_timeout);
+ /* Remove the connection from the list so unacked logic can detect when
+ * a certain pool is not being utilized.
+ */
+ hci_conn_hash_del(hdev, conn);
- if (hdev->le_pkts)
- hdev->le_cnt += conn->sent;
+ /* Handle unacked frames:
+ *
+ * - In case there are no connection, or if restoring the buffers
+ * considered in transist would overflow, restore all buffers to the
+ * pool.
+ * - Otherwise restore just the buffers considered in transit for the
+ * hci_conn
+ */
+ switch (conn->type) {
+ case ACL_LINK:
+ if (!hci_conn_num(hdev, ACL_LINK) ||
+ hdev->acl_cnt + conn->sent > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
else
hdev->acl_cnt += conn->sent;
- } else {
- /* Unacked ISO frames */
- if (conn->type == CIS_LINK ||
- conn->type == BIS_LINK) {
- if (hdev->iso_pkts)
- hdev->iso_cnt += conn->sent;
- else if (hdev->le_pkts)
+ break;
+ case LE_LINK:
+ cancel_delayed_work(&conn->le_conn_timeout);
+
+ if (hdev->le_pkts) {
+ if (!hci_conn_num(hdev, LE_LINK) ||
+ hdev->le_cnt + conn->sent > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ else
hdev->le_cnt += conn->sent;
+ } else {
+ if ((!hci_conn_num(hdev, LE_LINK) &&
+ !hci_conn_num(hdev, ACL_LINK)) ||
+ hdev->acl_cnt + conn->sent > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
else
hdev->acl_cnt += conn->sent;
}
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
+ case PA_LINK:
+ if (!hci_iso_count(hdev) ||
+ hdev->iso_cnt + conn->sent > hdev->iso_pkts)
+ hdev->iso_cnt = hdev->iso_pkts;
+ else
+ hdev->iso_cnt += conn->sent;
+ break;
}
skb_queue_purge(&conn->data_q);
@@ -1502,7 +1539,7 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
/* This function requires the caller holds hdev->lock */
static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 sid, struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base)
+ __u8 base_len, __u8 *base, u16 timeout)
{
struct hci_conn *conn;
int err;
@@ -1544,6 +1581,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
conn->state = BT_CONNECT;
conn->sid = sid;
+ conn->conn_timeout = timeout;
hci_conn_hold(conn);
return conn;
@@ -1884,7 +1922,8 @@ done:
}
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos)
+ __u8 dst_type, struct bt_iso_qos *qos,
+ u16 timeout)
{
struct hci_conn *cis;
@@ -1899,6 +1938,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
cis->dst_type = dst_type;
cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
+ cis->conn_timeout = timeout;
}
if (cis->state == BT_CONNECTED)
@@ -2081,7 +2121,7 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
- conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_SLAVE);
+ conn = hci_conn_add_unset(hdev, PA_LINK, dst, HCI_ROLE_SLAVE);
if (IS_ERR(conn))
return conn;
@@ -2138,7 +2178,7 @@ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base)
+ __u8 base_len, __u8 *base, u16 timeout)
{
struct hci_conn *conn;
struct hci_conn *parent;
@@ -2146,7 +2186,8 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
struct hci_link *link;
/* Look for any BIS that is open for rebinding */
- conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
+ conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN,
+ HCI_ROLE_MASTER);
if (conn) {
memcpy(qos, &conn->iso_qos, sizeof(*qos));
conn->state = BT_CONNECTED;
@@ -2158,7 +2199,7 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
base, base_len);
/* We need hci_conn object using the BDADDR_ANY as dst */
- conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir);
+ conn = hci_add_bis(hdev, dst, sid, qos, base_len, eir, timeout);
if (IS_ERR(conn))
return conn;
@@ -2211,13 +2252,13 @@ static void bis_mark_per_adv(struct hci_conn *conn, void *data)
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base)
+ __u8 base_len, __u8 *base, u16 timeout)
{
struct hci_conn *conn;
int err;
struct iso_list_data data;
- conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base);
+ conn = hci_bind_bis(hdev, dst, sid, qos, base_len, base, timeout);
if (IS_ERR(conn))
return conn;
@@ -2260,7 +2301,8 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
}
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos)
+ __u8 dst_type, struct bt_iso_qos *qos,
+ u16 timeout)
{
struct hci_conn *le;
struct hci_conn *cis;
@@ -2284,7 +2326,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
hci_iso_qos_setup(hdev, le, &qos->ucast.in,
le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
- cis = hci_bind_cis(hdev, dst, dst_type, qos);
+ cis = hci_bind_cis(hdev, dst, dst_type, qos, timeout);
if (IS_ERR(cis)) {
hci_conn_drop(le);
return cis;
@@ -2979,6 +3021,7 @@ void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb)
switch (conn->type) {
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
case ACL_LINK:
case LE_LINK:
break;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 07a8b4281a39..3418d7b964a1 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -64,7 +64,7 @@ static DEFINE_IDA(hci_index_ida);
/* Get HCI device by index.
* Device is held on return. */
-struct hci_dev *hci_dev_get(int index)
+static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
{
struct hci_dev *hdev = NULL, *d;
@@ -77,6 +77,8 @@ struct hci_dev *hci_dev_get(int index)
list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
+ if (srcu_index)
+ *srcu_index = srcu_read_lock(&d->srcu);
break;
}
}
@@ -84,6 +86,22 @@ struct hci_dev *hci_dev_get(int index)
return hdev;
}
+struct hci_dev *hci_dev_get(int index)
+{
+ return __hci_dev_get(index, NULL);
+}
+
+static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
+{
+ return __hci_dev_get(index, srcu_index);
+}
+
+static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
+{
+ srcu_read_unlock(&hdev->srcu, srcu_index);
+ hci_dev_put(hdev);
+}
+
/* ---- Inquiry support ---- */
bool hci_discovery_active(struct hci_dev *hdev)
@@ -568,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
int hci_dev_reset(__u16 dev)
{
struct hci_dev *hdev;
- int err;
+ int err, srcu_index;
- hdev = hci_dev_get(dev);
+ hdev = hci_dev_get_srcu(dev, &srcu_index);
if (!hdev)
return -ENODEV;
@@ -592,7 +610,7 @@ int hci_dev_reset(__u16 dev)
err = hci_dev_do_reset(hdev);
done:
- hci_dev_put(hdev);
+ hci_dev_put_srcu(hdev, srcu_index);
return err;
}
@@ -1238,12 +1256,10 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (addr_type == irk->addr_type &&
bacmp(bdaddr, &irk->bdaddr) == 0) {
irk_to_return = irk;
- goto done;
+ break;
}
}
-done:
-
if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
irk_to_return->val)) {
bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
@@ -2433,6 +2449,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
if (!hdev)
return NULL;
+ if (init_srcu_struct(&hdev->srcu)) {
+ kfree(hdev);
+ return NULL;
+ }
+
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
@@ -2631,7 +2652,7 @@ int hci_register_dev(struct hci_dev *hdev)
/* Devices that are marked for raw-only usage are unconfigured
* and should not be included in normal operation.
*/
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* Mark Remote Wakeup connection flag as supported if driver has wakeup
@@ -2678,6 +2699,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
+ synchronize_srcu(&hdev->srcu);
+ cleanup_srcu_struct(&hdev->srcu);
+
disable_work_sync(&hdev->rx_work);
disable_work_sync(&hdev->cmd_work);
disable_work_sync(&hdev->tx_work);
@@ -2758,7 +2782,7 @@ int hci_register_suspend_notifier(struct hci_dev *hdev)
int ret = 0;
if (!hdev->suspend_notifier.notifier_call &&
- !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
+ !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) {
hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
ret = register_pm_notifier(&hdev->suspend_notifier);
}
@@ -2912,12 +2936,14 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_ACLDATA_PKT:
/* Detect if ISO packet has been sent as ACL */
if (hci_conn_num(hdev, CIS_LINK) ||
- hci_conn_num(hdev, BIS_LINK)) {
+ hci_conn_num(hdev, BIS_LINK) ||
+ hci_conn_num(hdev, PA_LINK)) {
__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
__u8 type;
type = hci_conn_lookup_type(hdev, hci_handle(handle));
- if (type == CIS_LINK || type == BIS_LINK)
+ if (type == CIS_LINK || type == BIS_LINK ||
+ type == PA_LINK)
hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
}
break;
@@ -3241,6 +3267,8 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
spin_unlock_bh(&queue->lock);
}
+
+ bt_dev_dbg(hdev, "chan %p queued %d", chan, skb_queue_len(queue));
}
void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
@@ -3272,6 +3300,10 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
+
+ bt_dev_dbg(hdev, "hcon %p queued %d", conn,
+ skb_queue_len(&conn->data_q));
+
queue_work(hdev->workqueue, &hdev->tx_work);
}
@@ -3331,6 +3363,8 @@ static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
__skb_queue_tail(queue, skb);
} while (list);
}
+
+ bt_dev_dbg(hdev, "hcon %p queued %d", conn, skb_queue_len(queue));
}
void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
@@ -3372,8 +3406,8 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
break;
case CIS_LINK:
case BIS_LINK:
- cnt = hdev->iso_mtu ? hdev->iso_cnt :
- hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ case PA_LINK:
+ cnt = hdev->iso_cnt;
break;
default:
cnt = 0;
@@ -3385,7 +3419,7 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
}
static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
- __u8 type2, int *quote)
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
@@ -3397,10 +3431,14 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if ((c->type != type && c->type != type2) ||
+ if (c->type != type ||
skb_queue_empty(&c->data_q))
continue;
+ bt_dev_dbg(hdev, "hcon %p state %s queued %d", c,
+ state_to_string(c->state),
+ skb_queue_len(&c->data_q));
+
if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
continue;
@@ -3559,24 +3597,37 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
{
- unsigned long last_tx;
+ unsigned long timeout;
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return;
switch (type) {
+ case ACL_LINK:
+ /* tx timeout must be longer than maximum link supervision
+ * timeout (40.9 seconds)
+ */
+ timeout = hdev->acl_last_tx + HCI_ACL_TX_TIMEOUT;
+ break;
case LE_LINK:
- last_tx = hdev->le_last_tx;
+ /* tx timeout must be longer than maximum link supervision
+ * timeout (40.9 seconds)
+ */
+ timeout = hdev->le_last_tx + HCI_ACL_TX_TIMEOUT;
break;
- default:
- last_tx = hdev->acl_last_tx;
+ case CIS_LINK:
+ case BIS_LINK:
+ case PA_LINK:
+ /* tx timeout must be longer than the maximum transport latency
+ * (8.388607 seconds)
+ */
+ timeout = hdev->iso_last_tx + HCI_ISO_TX_TIMEOUT;
break;
+ default:
+ return;
}
- /* tx timeout must be longer than maximum link supervision timeout
- * (40.9 seconds)
- */
- if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
+ if (!cnt && time_after(jiffies, timeout))
hci_link_tx_to(hdev, type);
}
@@ -3601,7 +3652,7 @@ static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
else
cnt = &hdev->sco_cnt;
- while (*cnt && (conn = hci_low_sent(hdev, type, type, &quote))) {
+ while (*cnt && (conn = hci_low_sent(hdev, type, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_conn_frame(hdev, conn, skb);
@@ -3720,8 +3771,8 @@ static void hci_sched_le(struct hci_dev *hdev)
hci_prio_recalculate(hdev, LE_LINK);
}
-/* Schedule CIS */
-static void hci_sched_iso(struct hci_dev *hdev)
+/* Schedule iso */
+static void hci_sched_iso(struct hci_dev *hdev, __u8 type)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -3729,17 +3780,19 @@ static void hci_sched_iso(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
- if (!hci_conn_num(hdev, CIS_LINK) &&
- !hci_conn_num(hdev, BIS_LINK))
+ if (!hci_conn_num(hdev, type))
return;
- cnt = hdev->iso_pkts ? &hdev->iso_cnt :
- hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
- while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK,
- &quote))) {
+ cnt = &hdev->iso_cnt;
+
+ __check_timeout(hdev, *cnt, type);
+
+ while (*cnt && (conn = hci_low_sent(hdev, type, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
+
hci_send_conn_frame(hdev, conn, skb);
+ hdev->iso_last_tx = jiffies;
conn->sent++;
if (conn->sent == ~0)
@@ -3761,7 +3814,9 @@ static void hci_tx_work(struct work_struct *work)
/* Schedule queues and send stuff to HCI driver */
hci_sched_sco(hdev, SCO_LINK);
hci_sched_sco(hdev, ESCO_LINK);
- hci_sched_iso(hdev);
+ hci_sched_iso(hdev, CIS_LINK);
+ hci_sched_iso(hdev, BIS_LINK);
+ hci_sched_iso(hdev, PA_LINK);
hci_sched_acl(hdev);
hci_sched_le(hdev);
}
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index f625074d1f00..99e2e9fc70e8 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -38,7 +38,7 @@ static ssize_t __name ## _read(struct file *file, \
struct hci_dev *hdev = file->private_data; \
char buf[3]; \
\
- buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N'; \
+ buf[0] = test_bit(__quirk, hdev->quirk_flags) ? 'Y' : 'N'; \
buf[1] = '\n'; \
buf[2] = '\0'; \
return simple_read_from_buffer(user_buf, count, ppos, buf, 2); \
@@ -59,10 +59,10 @@ static ssize_t __name ## _write(struct file *file, \
if (err) \
return err; \
\
- if (enable == test_bit(__quirk, &hdev->quirks)) \
+ if (enable == test_bit(__quirk, hdev->quirk_flags)) \
return -EALREADY; \
\
- change_bit(__quirk, &hdev->quirks); \
+ change_bit(__quirk, hdev->quirk_flags); \
\
return count; \
} \
@@ -1356,7 +1356,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
* for the vendor callback. Instead just store the desired value and
* the setting will be programmed when the controller gets powered on.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) &&
(!test_bit(HCI_RUNNING, &hdev->flags) ||
hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
goto done;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 66052d6aaa1d..d790b0d4eb9a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -908,8 +908,8 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
return rp->status;
if (hdev->max_page < rp->max_page) {
- if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
- &hdev->quirks))
+ if (hci_test_quirk(hdev,
+ HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2))
bt_dev_warn(hdev, "broken local ext features page 2");
else
hdev->max_page = rp->max_page;
@@ -936,7 +936,7 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
- if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE)) {
hdev->sco_mtu = 64;
hdev->sco_pkts = 8;
}
@@ -2150,40 +2150,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
return rp->status;
}
-static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_le_set_ext_adv_params *rp = data;
- struct hci_cp_le_set_ext_adv_params *cp;
- struct adv_info *adv_instance;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
- if (!cp)
- return rp->status;
-
- hci_dev_lock(hdev);
- hdev->adv_addr_type = cp->own_addr_type;
- if (!cp->handle) {
- /* Store in hdev for instance 0 */
- hdev->adv_tx_power = rp->tx_power;
- } else {
- adv_instance = hci_find_adv_instance(hdev, cp->handle);
- if (adv_instance)
- adv_instance->tx_power = rp->tx_power;
- }
- /* Update adv data as tx power is known now */
- hci_update_adv_data(hdev, cp->handle);
-
- hci_dev_unlock(hdev);
-
- return rp->status;
-}
-
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -2737,7 +2703,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
if (!conn)
goto unlock;
- if (status) {
+ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
@@ -2752,6 +2718,12 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
goto done;
}
+ /* During suspend, mark connection as closed immediately
+ * since we might not receive HCI_EV_DISCONN_COMPLETE
+ */
+ if (hdev->suspended)
+ conn->state = BT_CLOSED;
+
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
if (conn->type == ACL_LINK) {
@@ -3005,7 +2977,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
* state to indicate completion.
*/
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
- !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
goto unlock;
}
@@ -3024,7 +2996,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
* state to indicate completion.
*/
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
- !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
}
@@ -3115,8 +3087,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ /* Check for existing connection:
+ *
+ * 1. If it doesn't exist then it must be receiver/slave role.
+ * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
+ * of initiator/master role since there could be a collision where
+ * either side is attempting to connect or something like a fuzzing
+ * testing is trying to play tricks to destroy the hcon object before
+ * it even attempts to connect (e.g. hcon->state == BT_OPEN).
+ */
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
- if (!conn) {
+ if (!conn ||
+ (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
*/
@@ -3648,8 +3630,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
/* We skip the WRITE_AUTH_PAYLOAD_TIMEOUT for ATS2851 based controllers
* to avoid unexpected SMP command errors when pairing.
*/
- if (test_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
- &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT))
goto notify;
/* Set the default Authenticated Payload Timeout after
@@ -4164,8 +4145,6 @@ static const struct hci_cc {
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
hci_cc_le_read_num_adv_sets,
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
- HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
- sizeof(struct hci_rp_le_set_ext_adv_params)),
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
hci_cc_le_set_ext_adv_enable),
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
@@ -4422,6 +4401,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "num %d", ev->num);
+ hci_dev_lock(hdev);
+
for (i = 0; i < ev->num; i++) {
struct hci_comp_pkts_info *info = &ev->handles[i];
struct hci_conn *conn;
@@ -4435,7 +4416,17 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
if (!conn)
continue;
- conn->sent -= count;
+ /* Check if there is really enough packets outstanding before
+ * attempting to decrease the sent counter otherwise it could
+ * underflow..
+ */
+ if (conn->sent >= count) {
+ conn->sent -= count;
+ } else {
+ bt_dev_warn(hdev, "hcon %p sent %u < count %u",
+ conn, conn->sent, count);
+ conn->sent = 0;
+ }
for (i = 0; i < count; ++i)
hci_conn_tx_dequeue(conn);
@@ -4469,19 +4460,10 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
case CIS_LINK:
case BIS_LINK:
- if (hdev->iso_pkts) {
- hdev->iso_cnt += count;
- if (hdev->iso_cnt > hdev->iso_pkts)
- hdev->iso_cnt = hdev->iso_pkts;
- } else if (hdev->le_pkts) {
- hdev->le_cnt += count;
- if (hdev->le_cnt > hdev->le_pkts)
- hdev->le_cnt = hdev->le_pkts;
- } else {
- hdev->acl_cnt += count;
- if (hdev->acl_cnt > hdev->acl_pkts)
- hdev->acl_cnt = hdev->acl_pkts;
- }
+ case PA_LINK:
+ hdev->iso_cnt += count;
+ if (hdev->iso_cnt > hdev->iso_pkts)
+ hdev->iso_cnt = hdev->iso_pkts;
break;
default:
@@ -4492,6 +4474,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
}
queue_work(hdev->workqueue, &hdev->tx_work);
+
+ hci_dev_unlock(hdev);
}
static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
@@ -5654,8 +5638,18 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
- if (!conn) {
+ /* Check for existing connection:
+ *
+ * 1. If it doesn't exist then use the role to create a new object.
+ * 2. If it does exist confirm that it is connecting/BT_CONNECT in case
+ * of initiator/master role since there could be a collision where
+ * either side is attempting to connect or something like a fuzzing
+ * testing is trying to play tricks to destroy the hcon object before
+ * it even attempts to connect (e.g. hcon->state == BT_OPEN).
+ */
+ conn = hci_conn_hash_lookup_role(hdev, LE_LINK, role, bdaddr);
+ if (!conn ||
+ (conn->role == HCI_ROLE_MASTER && conn->state != BT_CONNECT)) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
*/
@@ -5754,7 +5748,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->state = BT_CONFIG;
/* Store current advertising instance as connection advertising instance
- * when sotfware rotation is in use so it can be re-enabled when
+ * when software rotation is in use so it can be re-enabled when
* disconnected.
*/
if (!ext_adv_capable(hdev))
@@ -5950,7 +5944,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
* while we have an existing one in peripheral role.
*/
if (hdev->conn_hash.le_num_peripheral > 0 &&
- (test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) ||
+ (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES) ||
!(hdev->le_states[3] & 0x10)))
return NULL;
@@ -6276,6 +6270,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
{
+ u16 pdu_type = evt_type & ~LE_EXT_ADV_DATA_STATUS_MASK;
+
+ if (!pdu_type)
+ return LE_ADV_NONCONN_IND;
+
if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
switch (evt_type) {
case LE_LEGACY_ADV_IND:
@@ -6307,8 +6306,7 @@ static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
if (evt_type & LE_EXT_ADV_SCAN_IND)
return LE_ADV_SCAN_IND;
- if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
- evt_type & LE_EXT_ADV_DIRECT_IND)
+ if (evt_type & LE_EXT_ADV_DIRECT_IND)
return LE_ADV_NONCONN_IND;
invalid:
@@ -6346,8 +6344,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
- if (test_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
- &hdev->quirks)) {
+ if (hci_test_quirk(hdev,
+ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY)) {
info->primary_phy &= 0x1f;
info->secondary_phy &= 0x1f;
}
@@ -6387,8 +6385,8 @@ static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
}
-static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
+static void hci_le_pa_sync_established_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_ev_le_pa_sync_established *ev = data;
int mask = hdev->link_mode;
@@ -6414,7 +6412,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
conn->sync_handle = le16_to_cpu(ev->handle);
conn->sid = HCI_SID_INVALID;
- mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, BIS_LINK,
+ mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
&flags);
if (!(mask & HCI_LM_ACCEPT)) {
hci_le_pa_term_sync(hdev, ev->handle);
@@ -6425,7 +6423,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
goto unlock;
/* Add connection to indicate PA sync event */
- pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY,
+ pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
HCI_ROLE_SLAVE);
if (IS_ERR(pa_sync))
@@ -6456,7 +6454,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
+ mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
if (!(mask & HCI_LM_ACCEPT))
goto unlock;
@@ -6718,8 +6716,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
+static void hci_le_cis_established_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_evt_le_cis_established *ev = data;
struct hci_conn *conn;
@@ -6777,8 +6775,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
qos->ucast.out.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
1000);
- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
+ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
qos->ucast.in.phy = ev->c_phy;
qos->ucast.out.phy = ev->p_phy;
break;
@@ -6792,8 +6790,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
qos->ucast.in.latency =
DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
1000);
- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
+ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
+ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
qos->ucast.out.phy = ev->c_phy;
qos->ucast.in.phy = ev->p_phy;
break;
@@ -6913,7 +6911,8 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
/* Connect all BISes that are bound to the BIG */
while ((conn = hci_conn_hash_lookup_big_state(hdev, ev->handle,
- BT_BOUND))) {
+ BT_BOUND,
+ HCI_ROLE_MASTER))) {
if (ev->status) {
hci_connect_cfm(conn, ev->status);
hci_conn_del(conn);
@@ -6946,7 +6945,7 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_evt_le_big_sync_estabilished *ev = data;
+ struct hci_evt_le_big_sync_established *ev = data;
struct hci_conn *bis, *conn;
int i;
@@ -6988,9 +6987,14 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
continue;
}
- if (ev->status != 0x42)
+ if (ev->status != 0x42) {
/* Mark PA sync as established */
set_bit(HCI_CONN_PA_SYNC, &bis->flags);
+ /* Reset cleanup callback of PA Sync so it doesn't
+ * terminate the sync when deleting the connection.
+ */
+ conn->cleanup = NULL;
+ }
bis->sync_handle = conn->sync_handle;
bis->iso_qos.bcast.big = ev->handle;
@@ -7002,7 +7006,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
if (!ev->status) {
+ bis->state = BT_CONNECTED;
set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
+ hci_debugfs_create_conn(bis);
+ hci_conn_add_sysfs(bis);
hci_iso_setup_path(bis);
}
}
@@ -7026,6 +7033,42 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_evt_le_big_sync_lost *ev = data;
+ struct hci_conn *bis, *conn;
+ bool mgmt_conn;
+
+ bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
+
+ hci_dev_lock(hdev);
+
+ /* Delete the pa sync connection */
+ bis = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
+ if (bis) {
+ conn = hci_conn_hash_lookup_pa_sync_handle(hdev,
+ bis->sync_handle);
+ if (conn)
+ hci_conn_del(conn);
+ }
+
+ /* Delete each bis connection */
+ while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
+ BT_CONNECTED,
+ HCI_ROLE_SLAVE))) {
+ mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
+ mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
+ ev->reason, mgmt_conn);
+
+ clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
+ hci_disconn_cfm(bis, ev->reason);
+ hci_conn_del(bis);
+ }
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -7077,7 +7120,7 @@ unlock:
/* Entries in this table shall have their position according to the subevent
* opcode they handle so the use of the macros above is recommend since it does
* attempt to initialize at its proper index using Designated Initializers that
- * way events without a callback function can be ommited.
+ * way events without a callback function can be omitted.
*/
static const struct hci_le_ev {
void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
@@ -7123,7 +7166,7 @@ static const struct hci_le_ev {
HCI_MAX_EVENT_SIZE),
/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
- hci_le_pa_sync_estabilished_evt,
+ hci_le_pa_sync_established_evt,
sizeof(struct hci_ev_le_pa_sync_established)),
/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
@@ -7134,7 +7177,7 @@ static const struct hci_le_ev {
HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
sizeof(struct hci_evt_le_ext_adv_set_term)),
/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
- HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
+ HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established_evt,
sizeof(struct hci_evt_le_cis_established)),
/* [0x1a = HCI_EVT_LE_CIS_REQ] */
HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
@@ -7147,7 +7190,12 @@ static const struct hci_le_ev {
/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */
HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
hci_le_big_sync_established_evt,
- sizeof(struct hci_evt_le_big_sync_estabilished),
+ sizeof(struct hci_evt_le_big_sync_established),
+ HCI_MAX_EVENT_SIZE),
+ /* [0x1e = HCI_EVT_LE_BIG_SYNC_LOST] */
+ HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_LOST,
+ hci_le_big_sync_lost_evt,
+ sizeof(struct hci_evt_le_big_sync_lost),
HCI_MAX_EVENT_SIZE),
/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
@@ -7433,7 +7481,7 @@ static const struct hci_ev {
/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
sizeof(struct hci_ev_sync_conn_complete)),
- /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
+ /* [0x2f = HCI_EV_EXTENDED_INQUIRY_RESULT] */
HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
hci_extended_inquiry_result_evt,
sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 428ee5c7de7e..fc866759910d 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -118,7 +118,7 @@ static void hci_sock_free_cookie(struct sock *sk)
int id = hci_pi(sk)->cookie;
if (id) {
- hci_pi(sk)->cookie = 0xffffffff;
+ hci_pi(sk)->cookie = 0;
ida_free(&sock_cookie_ida, id);
}
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 6687f2a4d1eb..eefdb6134ca5 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -393,7 +393,7 @@ static void le_scan_disable(struct work_struct *work)
if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
goto _return;
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
hdev->discovery.state != DISCOVERY_RESOLVING)
goto discov_stopped;
@@ -1205,10 +1205,127 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+static int
+hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
+ const struct hci_cp_le_set_ext_adv_params *cp,
+ struct hci_rp_le_set_ext_adv_params *rp)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
+ cp, HCI_CMD_TIMEOUT);
+
+ /* If command return a status event, skb will be set to -ENODATA */
+ if (skb == ERR_PTR(-ENODATA))
+ return 0;
+
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
+ HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*rp)) {
+ bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
+ HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ memcpy(rp, skb->data, sizeof(*rp));
+ kfree_skb(skb);
+
+ if (!rp->status) {
+ hdev->adv_addr_type = cp->own_addr_type;
+ if (!cp->handle) {
+ /* Store in hdev for instance 0 */
+ hdev->adv_tx_power = rp->tx_power;
+ } else if (adv) {
+ adv->tx_power = rp->tx_power;
+ }
+ }
+
+ return rp->status;
+}
+
+static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
+ HCI_MAX_EXT_AD_LENGTH);
+ u8 len;
+ struct adv_info *adv = NULL;
+ int err;
+
+ if (instance) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv || !adv->adv_data_changed)
+ return 0;
+ }
+
+ len = eir_create_adv_data(hdev, instance, pdu->data,
+ HCI_MAX_EXT_AD_LENGTH);
+
+ pdu->length = len;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
+ struct_size(pdu, data, len), pdu,
+ HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ /* Update data if the command succeed */
+ if (adv) {
+ adv->adv_data_changed = false;
+ } else {
+ memcpy(hdev->adv_data, pdu->data, len);
+ hdev->adv_data_len = len;
+ }
+
+ return 0;
+}
+
+static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return 0;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_set_ext_adv_data_sync(hdev, instance);
+
+ return hci_set_adv_data_sync(hdev, instance);
+}
+
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_ext_adv_params cp;
- bool connectable;
+ struct hci_rp_le_set_ext_adv_params rp;
+ bool connectable, require_privacy;
u32 flags;
bdaddr_t random_addr;
u8 own_addr_type;
@@ -1228,7 +1345,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
* Command Disallowed error, so we must first disable the
* instance if it is active.
*/
- if (adv && !adv->pending) {
+ if (adv) {
err = hci_disable_ext_adv_instance_sync(hdev, instance);
if (err)
return err;
@@ -1246,10 +1363,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
return -EPERM;
/* Set require_privacy to true only when non-connectable
- * advertising is used. In that case it is fine to use a
- * non-resolvable private address.
+ * advertising is used and it is not periodic.
+ * In that case it is fine to use a non-resolvable private address.
*/
- err = hci_get_random_address(hdev, !connectable,
+ require_privacy = !connectable && !(adv && adv->periodic);
+
+ err = hci_get_random_address(hdev, require_privacy,
adv_use_rpa(hdev, flags), adv,
&own_addr_type, &random_addr);
if (err < 0)
@@ -1316,8 +1435,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.secondary_phy = HCI_ADV_PHY_1M;
}
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
+ if (err)
+ return err;
+
+ /* Update adv data as tx power is known now */
+ err = hci_set_ext_adv_data_sync(hdev, cp.handle);
if (err)
return err;
@@ -1822,79 +1945,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
-static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
-{
- DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
- HCI_MAX_EXT_AD_LENGTH);
- u8 len;
- struct adv_info *adv = NULL;
- int err;
-
- if (instance) {
- adv = hci_find_adv_instance(hdev, instance);
- if (!adv || !adv->adv_data_changed)
- return 0;
- }
-
- len = eir_create_adv_data(hdev, instance, pdu->data,
- HCI_MAX_EXT_AD_LENGTH);
-
- pdu->length = len;
- pdu->handle = adv ? adv->handle : instance;
- pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
-
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
- struct_size(pdu, data, len), pdu,
- HCI_CMD_TIMEOUT);
- if (err)
- return err;
-
- /* Update data if the command succeed */
- if (adv) {
- adv->adv_data_changed = false;
- } else {
- memcpy(hdev->adv_data, pdu->data, len);
- hdev->adv_data_len = len;
- }
-
- return 0;
-}
-
-static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
-{
- struct hci_cp_le_set_adv_data cp;
- u8 len;
-
- memset(&cp, 0, sizeof(cp));
-
- len = eir_create_adv_data(hdev, instance, cp.data, sizeof(cp.data));
-
- /* There's nothing to do if the data hasn't changed */
- if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
- return 0;
-
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
- hdev->adv_data_len = len;
-
- cp.length = len;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
-}
-
-int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
-{
- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- return 0;
-
- if (ext_adv_capable(hdev))
- return hci_set_ext_adv_data_sync(hdev, instance);
-
- return hci_set_adv_data_sync(hdev, instance);
-}
-
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
bool force)
{
@@ -1970,13 +2020,10 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
{
struct adv_info *adv, *n;
- int err = 0;
if (ext_adv_capable(hdev))
/* Remove all existing sets */
- err = hci_clear_adv_sets_sync(hdev, sk);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_clear_adv_sets_sync(hdev, sk);
/* This is safe as long as there is no command send while the lock is
* held.
@@ -2004,13 +2051,11 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk)
{
- int err = 0;
+ int err;
/* If we use extended advertising, instance has to be removed first. */
if (ext_adv_capable(hdev))
- err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
/* This is safe as long as there is no command send while the lock is
* held.
@@ -2109,16 +2154,13 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
int hci_disable_advertising_sync(struct hci_dev *hdev)
{
u8 enable = 0x00;
- int err = 0;
/* If controller is not advertising we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
return 0;
if (ext_adv_capable(hdev))
- err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
- if (ext_adv_capable(hdev))
- return err;
+ return hci_disable_ext_adv_instance_sync(hdev, 0x00);
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
sizeof(enable), &enable, HCI_CMD_TIMEOUT);
@@ -2481,6 +2523,10 @@ static int hci_pause_advertising_sync(struct hci_dev *hdev)
int err;
int old_state;
+ /* If controller is not advertising we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
+ return 0;
+
/* If already been paused there is nothing to do. */
if (hdev->advertising_paused)
return 0;
@@ -2550,6 +2596,13 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
hci_remove_ext_adv_instance_sync(hdev, adv->instance,
NULL);
}
+
+ /* If current advertising instance is set to instance 0x00
+ * then we need to re-enable it.
+ */
+ if (!hdev->cur_adv_instance)
+ err = hci_enable_ext_advertising_sync(hdev,
+ hdev->cur_adv_instance);
} else {
/* Schedule for most recent instance to be restarted and begin
* the software rotation loop
@@ -2885,7 +2938,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
if (sent) {
struct hci_conn *conn;
- conn = hci_conn_hash_lookup_ba(hdev, BIS_LINK,
+ conn = hci_conn_hash_lookup_ba(hdev, PA_LINK,
&sent->bdaddr);
if (conn) {
struct bt_iso_qos *qos = &conn->iso_qos;
@@ -3300,7 +3353,7 @@ static int hci_powered_update_adv_sync(struct hci_dev *hdev)
* advertising data. This also applies to the case
* where BR/EDR was toggled during the AUTO_OFF phase.
*/
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
list_empty(&hdev->adv_instances)) {
if (ext_adv_capable(hdev)) {
err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
@@ -3437,13 +3490,13 @@ int hci_update_scan_sync(struct hci_dev *hdev)
return hci_write_scan_enable_sync(hdev, scan);
}
-int hci_update_name_sync(struct hci_dev *hdev)
+int hci_update_name_sync(struct hci_dev *hdev, const u8 *name)
{
struct hci_cp_write_local_name cp;
memset(&cp, 0, sizeof(cp));
- memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+ memcpy(cp.name, name, sizeof(cp.name));
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
sizeof(cp), &cp,
@@ -3496,7 +3549,7 @@ int hci_powered_update_sync(struct hci_dev *hdev)
hci_write_fast_connectable_sync(hdev, false);
hci_update_scan_sync(hdev);
hci_update_class_sync(hdev);
- hci_update_name_sync(hdev);
+ hci_update_name_sync(hdev, hdev->dev_name);
hci_update_eir_sync(hdev);
}
@@ -3543,7 +3596,7 @@ static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
return;
- if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN))
baswap(&hdev->public_addr, &ba);
else
bacpy(&hdev->public_addr, &ba);
@@ -3618,7 +3671,7 @@ static int hci_init0_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "");
/* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
err = hci_reset_sync(hdev);
if (err)
return err;
@@ -3631,7 +3684,7 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
{
int err;
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
return 0;
err = hci_init0_sync(hdev);
@@ -3674,7 +3727,7 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
* supported commands.
*/
if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
- !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS))
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
0, NULL, HCI_CMD_TIMEOUT);
@@ -3688,7 +3741,7 @@ static int hci_init1_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "");
/* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ if (!hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE)) {
err = hci_reset_sync(hdev);
if (err)
return err;
@@ -3751,7 +3804,7 @@ static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return 0;
- if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -3778,7 +3831,7 @@ static int hci_clear_event_filter_sync(struct hci_dev *hdev)
* a hci_set_event_filter_sync() call succeeds, but we do
* the check both for parity and as a future reminder.
*/
- if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
return 0;
return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
@@ -3802,7 +3855,7 @@ static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
/* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) ||
- !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -3877,7 +3930,7 @@ static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
u8 mode;
if (!lmp_inq_rssi_capable(hdev) &&
- !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
return 0;
/* If Extended Inquiry Result events are supported, then
@@ -4067,7 +4120,7 @@ static int hci_set_event_mask_sync(struct hci_dev *hdev)
}
if (lmp_inq_rssi_capable(hdev) ||
- test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE))
events[4] |= 0x02; /* Inquiry Result with RSSI */
if (lmp_ext_feat_capable(hdev))
@@ -4119,7 +4172,7 @@ static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
struct hci_cp_read_stored_link_key cp;
if (!(hdev->commands[6] & 0x20) ||
- test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -4168,7 +4221,7 @@ static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
{
if (!(hdev->commands[18] & 0x04) ||
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
- test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
@@ -4182,7 +4235,7 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
* this command in the bit mask of supported commands.
*/
if (!(hdev->commands[13] & 0x01) ||
- test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
@@ -4377,7 +4430,7 @@ static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
{
if (!(hdev->commands[38] & 0x80) ||
- test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
@@ -4420,7 +4473,7 @@ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
if (!(hdev->commands[35] & 0x04) ||
- test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
@@ -4487,14 +4540,14 @@ static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
{
struct hci_cp_le_set_host_feature cp;
- if (!cis_capable(hdev))
+ if (!iso_capable(hdev))
return 0;
memset(&cp, 0, sizeof(cp));
/* Connected Isochronous Channels (Host Support) */
cp.bit_number = 32;
- cp.bit_value = 1;
+ cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@@ -4565,7 +4618,7 @@ static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
* just disable this command.
*/
if (!(hdev->commands[6] & 0x80) ||
- test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY))
return 0;
memset(&cp, 0, sizeof(cp));
@@ -4691,7 +4744,7 @@ static int hci_set_err_data_report_sync(struct hci_dev *hdev)
if (!(hdev->commands[18] & 0x08) ||
!(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
- test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ hci_test_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING))
return 0;
if (enabled == hdev->err_data_reporting)
@@ -4904,7 +4957,7 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
size_t i;
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
+ !hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP))
return 0;
bt_dev_dbg(hdev, "");
@@ -4915,7 +4968,7 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
ret = hdev->setup(hdev);
for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
- if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
+ if (hci_test_quirk(hdev, hci_broken_table[i].quirk))
bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
}
@@ -4923,10 +4976,10 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
* BD_ADDR invalid before creating the HCI device or in
* its setup callback.
*/
- invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
- test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ invalid_bdaddr = hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
+ hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (!ret) {
- if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY) &&
!bacmp(&hdev->public_addr, BDADDR_ANY))
hci_dev_get_bd_addr_from_property(hdev);
@@ -4948,7 +5001,7 @@ static int hci_dev_setup_sync(struct hci_dev *hdev)
* In case any of them is set, the controller has to
* start up as unconfigured.
*/
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
+ if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) ||
invalid_bdaddr)
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
@@ -5008,7 +5061,7 @@ static int hci_dev_init_sync(struct hci_dev *hdev)
* then they need to be reprogrammed after the init procedure
* completed.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
ret = hdev->set_diag(hdev, true);
@@ -5265,7 +5318,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
- if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE) &&
!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
set_bit(HCI_INIT, &hdev->flags);
hci_reset_sync(hdev);
@@ -5449,7 +5502,7 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
{
struct hci_cp_disconnect cp;
- if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
+ if (conn->type == BIS_LINK || conn->type == PA_LINK) {
/* This is a BIS connection, hci_conn_del will
* do the necessary cleanup.
*/
@@ -5518,7 +5571,7 @@ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
return HCI_ERROR_LOCAL_HOST_TERM;
}
- if (conn->type == BIS_LINK) {
+ if (conn->type == BIS_LINK || conn->type == PA_LINK) {
/* There is no way to cancel a BIS without terminating the BIG
* which is done later on connection cleanup.
*/
@@ -5583,7 +5636,7 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
if (conn->type == CIS_LINK)
return hci_le_reject_cis_sync(hdev, conn, reason);
- if (conn->type == BIS_LINK)
+ if (conn->type == BIS_LINK || conn->type == PA_LINK)
return -EINVAL;
if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
@@ -5633,7 +5686,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
}
/* Cleanup hci_conn object if it cannot be cancelled as it
- * likelly means the controller and host stack are out of sync
+ * likely means the controller and host stack are out of sync
* or in case of LE it was still scanning so it can be cleanup
* safely.
*/
@@ -5915,7 +5968,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
own_addr_type = ADDR_LE_DEV_PUBLIC;
if (hci_is_adv_monitoring(hdev) ||
- (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
+ (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER) &&
hdev->discovery.result_filtering)) {
/* Duplicate filter should be disabled when some advertisement
* monitor is activated, otherwise AdvMon can only receive one
@@ -5978,8 +6031,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev)
* and LE scanning are done sequentially with separate
* timeouts.
*/
- if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
- &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY)) {
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
/* During simultaneous discovery, we double LE scan
* interval. We must leave some time for the controller
@@ -6056,7 +6108,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev)
/* Some fake CSR controllers lock up after setting this type of
* filter, so avoid sending the request altogether.
*/
- if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL))
return 0;
/* Always clear event filter when starting */
@@ -6073,7 +6125,7 @@ static int hci_update_event_filter_sync(struct hci_dev *hdev)
&b->bdaddr,
HCI_CONN_SETUP_AUTO_ON);
if (err)
- bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
+ bt_dev_err(hdev, "Failed to set event filter for %pMR",
&b->bdaddr);
else
scan = SCAN_PAGE;
@@ -6277,6 +6329,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
struct hci_conn *conn)
{
struct hci_cp_le_set_ext_adv_params cp;
+ struct hci_rp_le_set_ext_adv_params rp;
int err;
bdaddr_t random_addr;
u8 own_addr_type;
@@ -6318,8 +6371,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
if (err)
return err;
- err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
+ if (err)
+ return err;
+
+ /* Update adv data as tx power is known now */
+ err = hci_set_ext_adv_data_sync(hdev, cp.handle);
if (err)
return err;
@@ -6766,8 +6823,8 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
return 0;
}
- /* No privacy so use a public address. */
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
+ /* No privacy, use the current address */
+ hci_copy_identity_address(hdev, rand_addr, own_addr_type);
return 0;
}
@@ -6937,8 +6994,6 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
hci_dev_lock(hdev);
- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
-
if (!hci_conn_valid(hdev, conn))
clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
@@ -6946,7 +7001,7 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
goto unlock;
/* Add connection to indicate PA sync error */
- pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY,
+ pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
HCI_ROLE_SLAVE);
if (IS_ERR(pa_sync))
@@ -6999,10 +7054,13 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
/* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
* it.
*/
- if (conn->sid == HCI_SID_INVALID)
- __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
- HCI_EV_LE_EXT_ADV_REPORT,
- conn->conn_timeout, NULL);
+ if (conn->sid == HCI_SID_INVALID) {
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+ HCI_EV_LE_EXT_ADV_REPORT,
+ conn->conn_timeout, NULL);
+ if (err == -ETIMEDOUT)
+ goto done;
+ }
memset(&cp, 0, sizeof(cp));
cp.options = qos->bcast.options;
@@ -7032,6 +7090,12 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
__hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
0, NULL, HCI_CMD_TIMEOUT);
+done:
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
+ hci_update_passive_scan_sync(hdev);
+
return err;
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 3c2c98eecc62..9b263d061e05 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -91,8 +91,8 @@ static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
iso_sock_match_t match, void *data);
/* ---- ISO timers ---- */
-#define ISO_CONN_TIMEOUT (HZ * 40)
-#define ISO_DISCONN_TIMEOUT (HZ * 2)
+#define ISO_CONN_TIMEOUT secs_to_jiffies(20)
+#define ISO_DISCONN_TIMEOUT secs_to_jiffies(2)
static void iso_conn_free(struct kref *ref)
{
@@ -111,6 +111,8 @@ static void iso_conn_free(struct kref *ref)
/* Ensure no more work items will run since hci_conn has been dropped */
disable_delayed_work_sync(&conn->timeout_work);
+ kfree_skb(conn->rx_skb);
+
kfree(conn);
}
@@ -367,7 +369,8 @@ static int iso_connect_bis(struct sock *sk)
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid,
&iso_pi(sk)->qos, iso_pi(sk)->base_len,
- iso_pi(sk)->base);
+ iso_pi(sk)->base,
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -376,7 +379,8 @@ static int iso_connect_bis(struct sock *sk)
hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst,
le_addr_type(iso_pi(sk)->dst_type),
iso_pi(sk)->bc_sid, &iso_pi(sk)->qos,
- iso_pi(sk)->base_len, iso_pi(sk)->base);
+ iso_pi(sk)->base_len, iso_pi(sk)->base,
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -413,7 +417,7 @@ static int iso_connect_bis(struct sock *sk)
sk->sk_state = BT_CONNECT;
} else {
sk->sk_state = BT_CONNECT;
- iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
@@ -458,11 +462,19 @@ static int iso_connect_cis(struct sock *sk)
goto unlock;
}
+ /* Check if there are available buffers for output/TX. */
+ if (iso_pi(sk)->qos.ucast.out.sdu && !hci_iso_count(hdev) &&
+ (hdev->iso_pkts && !hdev->iso_cnt)) {
+ err = -ENOBUFS;
+ goto unlock;
+ }
+
/* Just bind if DEFER_SETUP has been set */
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst,
le_addr_type(iso_pi(sk)->dst_type),
- &iso_pi(sk)->qos);
+ &iso_pi(sk)->qos,
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -470,7 +482,8 @@ static int iso_connect_cis(struct sock *sk)
} else {
hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst,
le_addr_type(iso_pi(sk)->dst_type),
- &iso_pi(sk)->qos);
+ &iso_pi(sk)->qos,
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -503,7 +516,7 @@ static int iso_connect_cis(struct sock *sk)
sk->sk_state = BT_CONNECT;
} else {
sk->sk_state = BT_CONNECT;
- iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
@@ -750,6 +763,13 @@ static void iso_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->sk_state);
+ /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */
+ if (iso_pi(sk)->conn) {
+ iso_conn_lock(iso_pi(sk)->conn);
+ iso_pi(sk)->conn->sk = NULL;
+ iso_conn_unlock(iso_pi(sk)->conn);
+ }
+
/* Kill poor orphan */
bt_sock_unlink(&iso_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
@@ -1347,7 +1367,7 @@ static int iso_sock_getname(struct socket *sock, struct sockaddr *addr,
bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst);
sa->iso_bdaddr_type = iso_pi(sk)->dst_type;
- if (hcon && hcon->type == BIS_LINK) {
+ if (hcon && (hcon->type == BIS_LINK || hcon->type == PA_LINK)) {
sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid;
sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis;
memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis,
@@ -1687,6 +1707,17 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
break;
+ case BT_PKT_SEQNUM:
+ err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
+ break;
+
+ if (opt)
+ set_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_PKT_SEQNUM, &bt_sk(sk)->flags);
+ break;
+
case BT_ISO_QOS:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
sk->sk_state != BT_CONNECT2 &&
@@ -1891,7 +1922,7 @@ static void iso_sock_ready(struct sock *sk)
static bool iso_match_big(struct sock *sk, void *data)
{
- struct hci_evt_le_big_sync_estabilished *ev = data;
+ struct hci_evt_le_big_sync_established *ev = data;
return ev->handle == iso_pi(sk)->qos.bcast.big;
}
@@ -1912,7 +1943,7 @@ static void iso_conn_ready(struct iso_conn *conn)
{
struct sock *parent = NULL;
struct sock *sk = conn->sk;
- struct hci_ev_le_big_sync_estabilished *ev = NULL;
+ struct hci_ev_le_big_sync_established *ev = NULL;
struct hci_ev_le_pa_sync_established *ev2 = NULL;
struct hci_ev_le_per_adv_report *ev3 = NULL;
struct hci_conn *hcon;
@@ -2023,7 +2054,7 @@ static void iso_conn_ready(struct iso_conn *conn)
hci_conn_hold(hcon);
iso_chan_add(conn, sk, parent);
- if ((ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) ||
+ if ((ev && ((struct hci_evt_le_big_sync_established *)ev)->status) ||
(ev2 && ev2->status)) {
/* Trigger error signal on child socket */
sk->sk_err = ECONNREFUSED;
@@ -2082,7 +2113,7 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
* proceed to establishing a BIG sync:
*
* 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific
- * SID to listen to and once sync is estabilished its handle needs to
+ * SID to listen to and once sync is established its handle needs to
* be stored in iso_pi(sk)->sync_handle so it can be matched once
* receiving the BIG Info.
* 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a
@@ -2226,7 +2257,8 @@ done:
static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
{
- if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) {
+ if (hcon->type != CIS_LINK && hcon->type != BIS_LINK &&
+ hcon->type != PA_LINK) {
if (hcon->type != LE_LINK)
return;
@@ -2267,7 +2299,8 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
- if (hcon->type != CIS_LINK && hcon->type != BIS_LINK)
+ if (hcon->type != CIS_LINK && hcon->type != BIS_LINK &&
+ hcon->type != PA_LINK)
return;
BT_DBG("hcon %p reason %d", hcon, reason);
@@ -2278,7 +2311,8 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{
struct iso_conn *conn = hcon->iso_data;
- __u16 pb, ts, len;
+ struct skb_shared_hwtstamps *hwts;
+ __u16 pb, ts, len, sn;
if (!conn)
goto drop;
@@ -2301,13 +2335,17 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (ts) {
struct hci_iso_ts_data_hdr *hdr;
- /* TODO: add timestamp to the packet? */
hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE);
if (!hdr) {
BT_ERR("Frame is too short (len %d)", skb->len);
goto drop;
}
+ /* Record the timestamp to skb */
+ hwts = skb_hwtstamps(skb);
+ hwts->hwtstamp = us_to_ktime(le32_to_cpu(hdr->ts));
+
+ sn = __le16_to_cpu(hdr->sn);
len = __le16_to_cpu(hdr->slen);
} else {
struct hci_iso_data_hdr *hdr;
@@ -2318,18 +2356,20 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
}
+ sn = __le16_to_cpu(hdr->sn);
len = __le16_to_cpu(hdr->slen);
}
flags = hci_iso_data_flags(len);
len = hci_iso_data_len(len);
- BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x", len,
- skb->len, flags);
+ BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x sn %d",
+ len, skb->len, flags, sn);
if (len == skb->len) {
/* Complete frame received */
hci_skb_pkt_status(skb) = flags & 0x03;
+ hci_skb_pkt_seqnum(skb) = sn;
iso_recv_frame(conn, skb);
return;
}
@@ -2352,9 +2392,17 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop;
hci_skb_pkt_status(conn->rx_skb) = flags & 0x03;
+ hci_skb_pkt_seqnum(conn->rx_skb) = sn;
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb->len);
conn->rx_len = len - skb->len;
+
+ /* Copy hw timestamp from skb to rx_skb if present */
+ if (ts) {
+ hwts = skb_hwtstamps(conn->rx_skb);
+ hwts->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+ }
+
break;
case ISO_CONT:
@@ -2379,7 +2427,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb->len);
conn->rx_len -= skb->len;
- return;
+ break;
case ISO_END:
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
@@ -2455,11 +2503,11 @@ static const struct net_proto_family iso_sock_family_ops = {
.create = iso_sock_create,
};
-static bool iso_inited;
+static bool inited;
-bool iso_enabled(void)
+bool iso_inited(void)
{
- return iso_inited;
+ return inited;
}
int iso_init(void)
@@ -2468,7 +2516,7 @@ int iso_init(void)
BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr));
- if (iso_inited)
+ if (inited)
return -EALREADY;
err = proto_register(&iso_proto, 0);
@@ -2496,7 +2544,7 @@ int iso_init(void)
iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
NULL, &iso_debugfs_fops);
- iso_inited = true;
+ inited = true;
return 0;
@@ -2507,7 +2555,7 @@ error:
int iso_exit(void)
{
- if (!iso_inited)
+ if (!inited)
return -EALREADY;
bt_procfs_cleanup(&init_net, "iso");
@@ -2521,7 +2569,7 @@ int iso_exit(void)
proto_unregister(&iso_proto);
- iso_inited = false;
+ inited = false;
return 0;
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a5bde5db58ef..805c752ac0a9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3415,7 +3415,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
struct l2cap_conf_efs efs;
u8 remote_efs = 0;
- u16 mtu = L2CAP_DEFAULT_MTU;
+ u16 mtu = 0;
u16 result = L2CAP_CONF_SUCCESS;
u16 size;
@@ -3520,6 +3520,29 @@ done:
/* Configure output options and let the other side know
* which ones we don't like. */
+ /* If MTU is not provided in configure request, try adjusting it
+ * to the current output MTU if it has been set
+ *
+ * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
+ *
+ * Each configuration parameter value (if any is present) in an
+ * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
+ * configuration parameter value that has been sent (or, in case
+ * of default values, implied) in the corresponding
+ * L2CAP_CONFIGURATION_REQ packet.
+ */
+ if (!mtu) {
+ /* Only adjust for ERTM channels as for older modes the
+ * remote stack may not be able to detect that the
+ * adjustment causing it to silently drop packets.
+ */
+ if (chan->mode == L2CAP_MODE_ERTM &&
+ chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
+ mtu = chan->omtu;
+ else
+ mtu = L2CAP_DEFAULT_MTU;
+ }
+
if (mtu < L2CAP_DEFAULT_MIN_MTU)
result = L2CAP_CONF_UNACCEPT;
else {
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5aa55fa69594..814fb8610ac4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -255,7 +255,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type,
- sk->sk_sndtimeo);
+ READ_ONCE(sk->sk_sndtimeo));
if (err)
return err;
@@ -1422,7 +1422,10 @@ static int l2cap_sock_release(struct socket *sock)
if (!sk)
return 0;
+ lock_sock_nested(sk, L2CAP_NESTING_PARENT);
l2cap_sock_cleanup_listen(sk);
+ release_sock(sk);
+
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, SHUT_RDWR);
@@ -1703,6 +1706,9 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
+ if (!sk)
+ return;
+
if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) {
sk->sk_state = BT_CONNECTED;
chan->state = BT_CONNECTED;
@@ -1725,7 +1731,7 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
- return sk->sk_sndtimeo;
+ return READ_ONCE(sk->sk_sndtimeo);
}
static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan)
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 43aa01fd07b9..305044a84478 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(baswap);
* bt_to_errno() - Bluetooth error codes to standard errno
* @code: Bluetooth error code to be converted
*
- * This function takes a Bluetooth error code as input and convets
+ * This function takes a Bluetooth error code as input and converts
* it to an equivalent Unix/standard errno value.
*
* Return:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index d540f7b4f75f..a3d16eece0d2 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -464,7 +464,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
/* Devices marked as raw-only are neither configured
* nor unconfigured controllers.
*/
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
continue;
if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
@@ -522,7 +522,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
/* Devices marked as raw-only are neither configured
* nor unconfigured controllers.
*/
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
continue;
if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
@@ -576,7 +576,7 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
/* Devices marked as raw-only are neither configured
* nor unconfigured controllers.
*/
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+ if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
continue;
if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
@@ -612,12 +612,12 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
static bool is_configured(struct hci_dev *hdev)
{
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
return false;
- if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
- test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
+ if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
+ hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
!bacmp(&hdev->public_addr, BDADDR_ANY))
return false;
@@ -628,12 +628,12 @@ static __le32 get_missing_options(struct hci_dev *hdev)
{
u32 options = 0;
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
+ if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
options |= MGMT_OPTION_EXTERNAL_CONFIG;
- if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
- test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
+ if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
+ hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
!bacmp(&hdev->public_addr, BDADDR_ANY))
options |= MGMT_OPTION_PUBLIC_ADDRESS;
@@ -669,7 +669,7 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
memset(&rp, 0, sizeof(rp));
rp.manufacturer = cpu_to_le16(hdev->manufacturer);
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
options |= MGMT_OPTION_EXTERNAL_CONFIG;
if (hdev->set_bdaddr)
@@ -828,8 +828,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_sc_capable(hdev))
settings |= MGMT_SETTING_SECURE_CONN;
- if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
}
@@ -841,8 +840,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_ADVERTISING;
}
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
- hdev->set_bdaddr)
+ if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
settings |= MGMT_SETTING_CONFIGURATION;
if (cis_central_capable(hdev))
@@ -924,19 +922,19 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
- if (cis_central_capable(hdev))
+ if (cis_central_enabled(hdev))
settings |= MGMT_SETTING_CIS_CENTRAL;
- if (cis_peripheral_capable(hdev))
+ if (cis_peripheral_enabled(hdev))
settings |= MGMT_SETTING_CIS_PERIPHERAL;
- if (bis_capable(hdev))
+ if (bis_enabled(hdev))
settings |= MGMT_SETTING_ISO_BROADCASTER;
- if (sync_recv_capable(hdev))
+ if (sync_recv_enabled(hdev))
settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
- if (ll_privacy_capable(hdev))
+ if (ll_privacy_enabled(hdev))
settings |= MGMT_SETTING_LL_PRIVACY;
return settings;
@@ -1080,7 +1078,8 @@ static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
struct mgmt_mesh_tx *mesh_tx;
hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
- hci_disable_advertising_sync(hdev);
+ if (list_empty(&hdev->adv_instances))
+ hci_disable_advertising_sync(hdev);
mesh_tx = mgmt_mesh_next(hdev, NULL);
if (mesh_tx)
@@ -1324,8 +1323,7 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
struct mgmt_mode *cp;
/* Make sure cmd still outstanding. */
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
cp = cmd->param;
@@ -1352,23 +1350,29 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
mgmt_status(err));
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
}
static int set_powered_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp;
+ struct mgmt_mode cp;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
/* Make sure cmd still outstanding. */
- if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
return -ECANCELED;
+ }
- cp = cmd->param;
+ memcpy(&cp, cmd->param, sizeof(cp));
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
BT_DBG("%s", hdev->name);
- return hci_set_powered_sync(hdev, cp->val);
+ return hci_set_powered_sync(hdev, cp.val);
}
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1517,8 +1521,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
/* Make sure cmd still outstanding. */
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
hci_dev_lock(hdev);
@@ -1540,12 +1543,15 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
static int set_discoverable_sync(struct hci_dev *hdev, void *data)
{
+ if (!mgmt_pending_listed(hdev, data))
+ return -ECANCELED;
+
BT_DBG("%s", hdev->name);
return hci_update_discoverable_sync(hdev);
@@ -1692,8 +1698,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
/* Make sure cmd still outstanding. */
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
hci_dev_lock(hdev);
@@ -1708,7 +1713,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -1744,6 +1749,9 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
static int set_connectable_sync(struct hci_dev *hdev, void *data)
{
+ if (!mgmt_pending_listed(hdev, data))
+ return -ECANCELED;
+
BT_DBG("%s", hdev->name);
return hci_update_connectable_sync(hdev);
@@ -1920,14 +1928,17 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
{
struct cmd_lookup match = { NULL, hdev };
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
- u8 enable = cp->val;
+ struct mgmt_mode *cp;
+ u8 enable;
bool changed;
/* Make sure cmd still outstanding. */
- if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
+ cp = cmd->param;
+ enable = cp->val;
+
if (err) {
u8 mgmt_err = mgmt_status(err);
@@ -1936,8 +1947,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
new_settings(hdev, NULL);
}
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
- cmd_status_rsp, &mgmt_err);
+ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
return;
}
@@ -1947,7 +1957,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
}
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
+ settings_rsp(cmd, &match);
if (changed)
new_settings(hdev, match.sk);
@@ -1961,14 +1971,25 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
static int set_ssp_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
+ struct mgmt_mode cp;
bool changed = false;
int err;
- if (cp->val)
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ memcpy(&cp, cmd->param, sizeof(cp));
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ if (cp.val)
changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
- err = hci_write_ssp_mode_sync(hdev, cp->val);
+ err = hci_write_ssp_mode_sync(hdev, cp.val);
if (!err && changed)
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
@@ -2061,32 +2082,50 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
static void set_le_complete(struct hci_dev *hdev, void *data, int err)
{
+ struct mgmt_pending_cmd *cmd = data;
struct cmd_lookup match = { NULL, hdev };
u8 status = mgmt_status(err);
bt_dev_dbg(hdev, "err %d", err);
- if (status) {
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
- &status);
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
return;
+
+ if (status) {
+ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
+ goto done;
}
- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
+ settings_rsp(cmd, &match);
new_settings(hdev, match.sk);
if (match.sk)
sock_put(match.sk);
+
+done:
+ mgmt_pending_free(cmd);
}
static int set_le_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
- u8 val = !!cp->val;
+ struct mgmt_mode cp;
+ u8 val;
int err;
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ memcpy(&cp, cmd->param, sizeof(cp));
+ val = !!cp.val;
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
if (!val) {
hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
@@ -2128,7 +2167,12 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
u8 status = mgmt_status(err);
- struct sock *sk = cmd->sk;
+ struct sock *sk;
+
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ sk = cmd->sk;
if (status) {
mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
@@ -2143,21 +2187,37 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
static int set_mesh_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_cp_set_mesh *cp = cmd->param;
- size_t len = cmd->param_len;
+ struct mgmt_cp_set_mesh cp;
+ size_t len;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ memcpy(&cp, cmd->param, sizeof(cp));
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ len = cmd->param_len;
memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
- if (cp->enable)
+ if (cp.enable)
hci_dev_set_flag(hdev, HCI_MESH);
else
hci_dev_clear_flag(hdev, HCI_MESH);
- len -= sizeof(*cp);
+ hdev->le_scan_interval = __le16_to_cpu(cp.period);
+ hdev->le_scan_window = __le16_to_cpu(cp.window);
+
+ len -= sizeof(cp);
/* If filters don't fit, forward all adv pkts */
if (len <= sizeof(hdev->mesh_ad_types))
- memcpy(hdev->mesh_ad_types, cp->ad_types, len);
+ memcpy(hdev->mesh_ad_types, cp.ad_types, len);
hci_update_passive_scan_sync(hdev);
return 0;
@@ -2167,6 +2227,7 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_cp_set_mesh *cp = data;
struct mgmt_pending_cmd *cmd;
+ __u16 period, window;
int err = 0;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -2180,6 +2241,23 @@ static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
MGMT_STATUS_INVALID_PARAMS);
+ /* Keep allowed ranges in sync with set_scan_params() */
+ period = __le16_to_cpu(cp->period);
+
+ if (period < 0x0004 || period > 0x4000)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ window = __le16_to_cpu(cp->window);
+
+ if (window < 0x0004 || window > 0x4000)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ if (window > period)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ MGMT_STATUS_INVALID_PARAMS);
+
hci_dev_lock(hdev);
cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
@@ -3217,6 +3295,7 @@ static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
switch (link_type) {
case CIS_LINK:
case BIS_LINK:
+ case PA_LINK:
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
@@ -3846,15 +3925,16 @@ static int name_changed_sync(struct hci_dev *hdev, void *data)
static void set_name_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_cp_set_local_name *cp = cmd->param;
+ struct mgmt_cp_set_local_name *cp;
u8 status = mgmt_status(err);
bt_dev_dbg(hdev, "err %d", err);
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
+ cp = cmd->param;
+
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
status);
@@ -3866,13 +3946,27 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
}
static int set_name_sync(struct hci_dev *hdev, void *data)
{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_local_name cp;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ memcpy(&cp, cmd->param, sizeof(cp));
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
if (lmp_bredr_capable(hdev)) {
- hci_update_name_sync(hdev);
+ hci_update_name_sync(hdev, cp.name);
hci_update_eir_sync(hdev);
}
@@ -4024,12 +4118,10 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
- struct sk_buff *skb = cmd->skb;
+ struct sk_buff *skb;
u8 status = mgmt_status(err);
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
- return;
+ skb = cmd->skb;
if (!status) {
if (!skb)
@@ -4056,7 +4148,7 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
if (skb && !IS_ERR(skb))
kfree_skb(skb);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
}
static int set_default_phy_sync(struct hci_dev *hdev, void *data)
@@ -4064,7 +4156,9 @@ static int set_default_phy_sync(struct hci_dev *hdev, void *data)
struct mgmt_pending_cmd *cmd = data;
struct mgmt_cp_set_phy_configuration *cp = cmd->param;
struct hci_cp_le_set_default_phy cp_phy;
- u32 selected_phys = __le32_to_cpu(cp->selected_phys);
+ u32 selected_phys;
+
+ selected_phys = __le32_to_cpu(cp->selected_phys);
memset(&cp_phy, 0, sizeof(cp_phy));
@@ -4204,7 +4298,7 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
len);
if (!cmd)
err = -ENOMEM;
@@ -4285,7 +4379,7 @@ static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
bt_dev_dbg(hdev, "sock %p", sk);
- if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
+ if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
return mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_WIDEBAND_SPEECH,
MGMT_STATUS_NOT_SUPPORTED);
@@ -4448,13 +4542,11 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
return -ENOMEM;
#ifdef CONFIG_BT_FEATURE_DEBUG
- if (!hdev) {
- flags = bt_dbg_get() ? BIT(0) : 0;
+ flags = bt_dbg_get() ? BIT(0) : 0;
- memcpy(rp->features[idx].uuid, debug_uuid, 16);
- rp->features[idx].flags = cpu_to_le32(flags);
- idx++;
- }
+ memcpy(rp->features[idx].uuid, debug_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
#endif
if (hdev && hci_dev_le_state_simultaneous(hdev)) {
@@ -4492,7 +4584,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
}
if (IS_ENABLED(CONFIG_BT_LE)) {
- flags = iso_enabled() ? BIT(0) : 0;
+ flags = iso_inited() ? BIT(0) : 0;
memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags);
idx++;
@@ -5165,7 +5257,17 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
{
struct mgmt_rp_add_adv_patterns_monitor rp;
struct mgmt_pending_cmd *cmd = data;
- struct adv_monitor *monitor = cmd->user_data;
+ struct adv_monitor *monitor;
+
+ /* This is likely the result of hdev being closed and mgmt_index_removed
+ * is attempting to clean up any pending command so
+ * hci_adv_monitors_clear is about to be called which will take care of
+ * freeing the adv_monitor instances.
+ */
+ if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ monitor = cmd->user_data;
hci_dev_lock(hdev);
@@ -5191,9 +5293,20 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct adv_monitor *monitor = cmd->user_data;
+ struct adv_monitor *mon;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
- return hci_add_adv_monitor(hdev, monitor);
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ mon = cmd->user_data;
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ return hci_add_adv_monitor(hdev, mon);
}
static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
@@ -5460,7 +5573,8 @@ unlock:
status);
}
-static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
+static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
+ int err)
{
struct mgmt_rp_read_local_oob_data mgmt_rp;
size_t rp_size = sizeof(mgmt_rp);
@@ -5480,7 +5594,8 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int e
bt_dev_dbg(hdev, "status %d", status);
if (status) {
- mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ status);
goto remove;
}
@@ -5762,17 +5877,12 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
bt_dev_dbg(hdev, "err %d", err);
- if (err == -ECANCELED)
- return;
-
- if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
- cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
- cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
cmd->param, 1);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
DISCOVERY_FINDING);
@@ -5780,6 +5890,9 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
static int start_discovery_sync(struct hci_dev *hdev, void *data)
{
+ if (!mgmt_pending_listed(hdev, data))
+ return -ECANCELED;
+
return hci_start_discovery_sync(hdev);
}
@@ -5985,15 +6098,14 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
return;
bt_dev_dbg(hdev, "err %d", err);
mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
cmd->param, 1);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
if (!err)
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
@@ -6001,6 +6113,9 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
static int stop_discovery_sync(struct hci_dev *hdev, void *data)
{
+ if (!mgmt_pending_listed(hdev, data))
+ return -ECANCELED;
+
return hci_stop_discovery_sync(hdev);
}
@@ -6210,14 +6325,18 @@ static void enable_advertising_instance(struct hci_dev *hdev, int err)
static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
{
+ struct mgmt_pending_cmd *cmd = data;
struct cmd_lookup match = { NULL, hdev };
u8 instance;
struct adv_info *adv_instance;
u8 status = mgmt_status(err);
+ if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
+ return;
+
if (status) {
- mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
- cmd_status_rsp, &status);
+ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
+ mgmt_pending_free(cmd);
return;
}
@@ -6226,8 +6345,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
else
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
- mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
- &match);
+ settings_rsp(cmd, &match);
new_settings(hdev, match.sk);
@@ -6259,10 +6377,23 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
static int set_adv_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
- u8 val = !!cp->val;
+ struct mgmt_mode cp;
+ u8 val;
- if (cp->val == 0x02)
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ if (!__mgmt_pending_listed(hdev, cmd)) {
+ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
+ }
+
+ memcpy(&cp, cmd->param, sizeof(cp));
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ val = !!cp.val;
+
+ if (cp.val == 0x02)
hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
else
hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
@@ -6432,6 +6563,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
MGMT_STATUS_NOT_SUPPORTED);
+ /* Keep allowed ranges in sync with set_mesh() */
interval = __le16_to_cpu(cp->interval);
if (interval < 0x0004 || interval > 0x4000)
@@ -7912,7 +8044,7 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
MGMT_STATUS_INVALID_PARAMS);
- if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
+ if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
MGMT_STATUS_NOT_SUPPORTED);
@@ -8014,10 +8146,6 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
u8 status = mgmt_status(err);
u16 eir_len;
- if (err == -ECANCELED ||
- cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
- return;
-
if (!status) {
if (!skb)
status = MGMT_STATUS_FAILED;
@@ -8124,7 +8252,7 @@ done:
kfree_skb(skb);
kfree(mgmt_rp);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
}
static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
@@ -8133,7 +8261,7 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
struct mgmt_pending_cmd *cmd;
int err;
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
+ cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
cp, sizeof(*cp));
if (!cmd)
return -ENOMEM;
@@ -9315,7 +9443,7 @@ void mgmt_index_added(struct hci_dev *hdev)
{
struct mgmt_ev_ext_index ev;
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
return;
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
@@ -9339,7 +9467,7 @@ void mgmt_index_removed(struct hci_dev *hdev)
struct mgmt_ev_ext_index ev;
struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
return;
mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
@@ -9683,7 +9811,9 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (!mgmt_connected)
return;
- if (link_type != ACL_LINK && link_type != LE_LINK)
+ if (link_type != ACL_LINK &&
+ link_type != LE_LINK &&
+ link_type != BIS_LINK)
return;
bacpy(&ev.addr.bdaddr, bdaddr);
@@ -10066,7 +10196,7 @@ static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
(rssi == HCI_RSSI_INVALID ||
(rssi < hdev->discovery.rssi &&
- !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
+ !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
return false;
if (hdev->discovery.uuid_count != 0) {
@@ -10084,7 +10214,7 @@ static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
/* If duplicate filtering does not report RSSI changes, then restart
* scanning to ensure updated result with updated RSSI values.
*/
- if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
/* Validate RSSI value against the RSSI threshold once more. */
if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
rssi < hdev->discovery.rssi)
diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c
index 6ef701c27da4..c4063d200c0a 100644
--- a/net/bluetooth/mgmt_config.c
+++ b/net/bluetooth/mgmt_config.c
@@ -13,13 +13,13 @@
#define HDEV_PARAM_U16(_param_name_) \
struct {\
- struct mgmt_tlv entry; \
+ struct mgmt_tlv_hdr entry; \
__le16 value; \
} __packed _param_name_
#define HDEV_PARAM_U8(_param_name_) \
struct {\
- struct mgmt_tlv entry; \
+ struct mgmt_tlv_hdr entry; \
__u8 value; \
} __packed _param_name_
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index a88a07da3947..aa7b5585cb26 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -320,6 +320,52 @@ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
mgmt_pending_free(cmd);
}
+bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
+{
+ struct mgmt_pending_cmd *tmp;
+
+ lockdep_assert_held(&hdev->mgmt_pending_lock);
+
+ if (!cmd)
+ return false;
+
+ list_for_each_entry(tmp, &hdev->mgmt_pending, list) {
+ if (cmd == tmp)
+ return true;
+ }
+
+ return false;
+}
+
+bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
+{
+ bool listed;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
+ listed = __mgmt_pending_listed(hdev, cmd);
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ return listed;
+}
+
+bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
+{
+ bool listed;
+
+ if (!cmd)
+ return false;
+
+ mutex_lock(&hdev->mgmt_pending_lock);
+
+ listed = __mgmt_pending_listed(hdev, cmd);
+ if (listed)
+ list_del(&cmd->list);
+
+ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ return listed;
+}
+
void mgmt_mesh_foreach(struct hci_dev *hdev,
void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
void *data, struct sock *sk)
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
index 024e51dd6937..bcba8c9d8952 100644
--- a/net/bluetooth/mgmt_util.h
+++ b/net/bluetooth/mgmt_util.h
@@ -65,6 +65,9 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
void *data, u16 len);
void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
+bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
+bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
+bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
void mgmt_mesh_foreach(struct hci_dev *hdev,
void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
void *data, struct sock *sk);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 5a8ccc491b14..c560d8467669 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -989,7 +989,7 @@ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false);
- if (!test_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks)) {
+ if (!hci_test_quirk(hdev, HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER)) {
if (!handle_data)
return;
mgmt_handle = handle_data->mgmt_handle;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 3b8f39618d65..96250807b32b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1962,7 +1962,8 @@ static void rfcomm_accept_connection(struct rfcomm_session *s)
int err;
/* Fast check for a new connection.
- * Avoids unnesesary socket allocations. */
+ * Avoids unnecessary socket allocations.
+ */
if (list_empty(&bt_sk(sock->sk)->accept_q))
return;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 21a5b5535ebc..376ce6de84be 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -438,7 +438,6 @@ static int __rfcomm_release_dev(void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dev *dev;
- struct tty_struct *tty;
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
@@ -464,11 +463,7 @@ static int __rfcomm_release_dev(void __user *arg)
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- tty = tty_port_tty_get(&dev->port);
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_tty_vhangup(&dev->port);
if (!test_bit(RFCOMM_TTY_OWNED, &dev->status))
tty_port_put(&dev->port);
@@ -980,7 +975,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty,
baud = RFCOMM_RPN_BR_230400;
break;
default:
- /* 9600 is standard accordinag to the RFCOMM specification */
+ /* 9600 is standard according to the RFCOMM specification */
baud = RFCOMM_RPN_BR_9600;
break;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 2945d27e75dc..ab0cf442d57b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -338,7 +338,7 @@ static int sco_connect(struct sock *sk)
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sco_pi(sk)->setting, &sco_pi(sk)->codec,
- sk->sk_sndtimeo);
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -367,7 +367,7 @@ static int sco_connect(struct sock *sk)
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
- sco_sock_set_timer(sk, sk->sk_sndtimeo);
+ sco_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
@@ -498,6 +498,13 @@ static void sco_sock_kill(struct sock *sk)
BT_DBG("sk %p state %d", sk, sk->sk_state);
+ /* Sock is dead, so set conn->sk to NULL to avoid possible UAF */
+ if (sco_pi(sk)->conn) {
+ sco_conn_lock(sco_pi(sk)->conn);
+ sco_pi(sk)->conn->sk = NULL;
+ sco_conn_unlock(sco_pi(sk)->conn);
+ }
+
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 47f359f24d1f..45512b2ba951 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1379,7 +1379,7 @@ static void smp_timeout(struct work_struct *work)
bt_dev_dbg(conn->hcon->hdev, "conn %p", conn);
- hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
+ hci_disconnect(conn->hcon, HCI_ERROR_AUTH_FAILURE);
}
static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -2977,8 +2977,25 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
if (code > SMP_CMD_MAX)
goto drop;
- if (smp && !test_and_clear_bit(code, &smp->allow_cmd))
+ if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) {
+ /* If there is a context and the command is not allowed consider
+ * it a failure so the session is cleanup properly.
+ */
+ switch (code) {
+ case SMP_CMD_IDENT_INFO:
+ case SMP_CMD_IDENT_ADDR_INFO:
+ case SMP_CMD_SIGN_INFO:
+ /* 3.6.1. Key distribution and generation
+ *
+ * A device may reject a distributed key by sending the
+ * Pairing Failed command with the reason set to
+ * "Key Rejected".
+ */
+ smp_failure(conn, SMP_KEY_REJECTED);
+ break;
+ }
goto drop;
+ }
/* If we don't have a context the only allowed commands are
* pairing request and security request.
@@ -3172,7 +3189,7 @@ static void smp_ready_cb(struct l2cap_chan *chan)
/* No need to call l2cap_chan_hold() here since we already own
* the reference taken in smp_new_conn_cb(). This is just the
* first time that we tie it to a specific pointer. The code in
- * l2cap_core.c ensures that there's no risk this function wont
+ * l2cap_core.c ensures that there's no risk this function won't
* get called if smp_new_conn_cb was previously called.
*/
conn->smp = chan;
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 87a59ec2c9f0..c5da53dfab04 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -138,6 +138,7 @@ struct smp_cmd_keypress_notify {
#define SMP_NUMERIC_COMP_FAILED 0x0c
#define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d
#define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e
+#define SMP_KEY_REJECTED 0x0f
#define SMP_MIN_ENC_KEY_SIZE 7
#define SMP_MAX_ENC_KEY_SIZE 16
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index f71f67c6896b..812457819b5a 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -171,7 +171,8 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
}
/* prog doesn't take the ownership of the reference from caller */
bpf_prog_inc(prog);
- bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
+ bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog,
+ prog->expected_attach_type);
op_idx = prog->expected_attach_type;
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index aaf13a7d58ed..8b7d0b90fea7 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -29,7 +29,6 @@
#include <trace/events/bpf_test_run.h>
struct bpf_test_timer {
- enum { NO_PREEMPT, NO_MIGRATE } mode;
u32 i;
u64 time_start, time_spent;
};
@@ -37,12 +36,7 @@ struct bpf_test_timer {
static void bpf_test_timer_enter(struct bpf_test_timer *t)
__acquires(rcu)
{
- rcu_read_lock();
- if (t->mode == NO_PREEMPT)
- preempt_disable();
- else
- migrate_disable();
-
+ rcu_read_lock_dont_migrate();
t->time_start = ktime_get_ns();
}
@@ -50,12 +44,7 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t)
__releases(rcu)
{
t->time_start = 0;
-
- if (t->mode == NO_PREEMPT)
- preempt_enable();
- else
- migrate_enable();
- rcu_read_unlock();
+ rcu_read_unlock_migrate();
}
static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
@@ -374,7 +363,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
{
struct xdp_test_data xdp = { .batch_size = batch_size };
- struct bpf_test_timer t = { .mode = NO_MIGRATE };
+ struct bpf_test_timer t = {};
int ret;
if (!repeat)
@@ -404,7 +393,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx;
- struct bpf_test_timer t = { NO_MIGRATE };
+ struct bpf_test_timer t = {};
enum bpf_cgroup_storage_type stype;
int ret;
@@ -524,27 +513,27 @@ __bpf_kfunc int bpf_fentry_test1(int a)
}
EXPORT_SYMBOL_GPL(bpf_fentry_test1);
-int noinline bpf_fentry_test2(int a, u64 b)
+noinline int bpf_fentry_test2(int a, u64 b)
{
return a + b;
}
-int noinline bpf_fentry_test3(char a, int b, u64 c)
+noinline int bpf_fentry_test3(char a, int b, u64 c)
{
return a + b + c;
}
-int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
+noinline int bpf_fentry_test4(void *a, char b, int c, u64 d)
{
return (long)a + b + c + d;
}
-int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
+noinline int bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
{
return a + (long)b + c + d + e;
}
-int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
+noinline int bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
{
return a + (long)b + c + d + (long)e + f;
}
@@ -553,13 +542,13 @@ struct bpf_fentry_test_t {
struct bpf_fentry_test_t *a;
};
-int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+noinline int bpf_fentry_test7(struct bpf_fentry_test_t *arg)
{
- asm volatile ("": "+r"(arg));
+ asm volatile ("" : "+r"(arg));
return (long)arg;
}
-int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+noinline int bpf_fentry_test8(struct bpf_fentry_test_t *arg)
{
return (long)arg->a;
}
@@ -569,12 +558,12 @@ __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
return *a;
}
-int noinline bpf_fentry_test10(const void *a)
+noinline int bpf_fentry_test10(const void *a)
{
return (long)a;
}
-void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
+noinline void bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
{
}
@@ -598,7 +587,7 @@ __bpf_kfunc int bpf_modify_return_test_tp(int nonce)
return nonce;
}
-int noinline bpf_fentry_shadow_test(int a)
+noinline int bpf_fentry_shadow_test(int a)
{
return a + 1;
}
@@ -665,7 +654,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
void *data;
- if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
+ if (user_size > PAGE_SIZE - headroom - tailroom)
return ERR_PTR(-EINVAL);
size = SKB_DATA_ALIGN(size);
@@ -1001,6 +990,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
kattr->test.cpu || kattr->test.batch_size)
return -EINVAL;
+ if (size < ETH_HLEN)
+ return -EINVAL;
+
data = bpf_test_init(kattr, kattr->test.data_size_in,
size, NET_SKB_PAD + NET_IP_ALIGN,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
@@ -1207,9 +1199,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
{
bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ u32 retval = 0, meta_sz = 0, duration, max_linear_sz, size;
+ u32 linear_sz = kattr->test.data_size_in;
u32 batch_size = kattr->test.batch_size;
- u32 retval = 0, duration, max_data_sz;
- u32 size = kattr->test.data_size_in;
u32 headroom = XDP_PACKET_HEADROOM;
u32 repeat = kattr->test.repeat;
struct netdev_rx_queue *rxqueue;
@@ -1246,39 +1238,45 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
if (ctx) {
/* There can't be user provided data before the meta data */
- if (ctx->data_meta || ctx->data_end != size ||
+ if (ctx->data_meta || ctx->data_end > kattr->test.data_size_in ||
ctx->data > ctx->data_end ||
unlikely(xdp_metalen_invalid(ctx->data)) ||
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
goto free_ctx;
/* Meta data is allocated from the headroom */
headroom -= ctx->data;
- }
- max_data_sz = 4096 - headroom - tailroom;
- if (size > max_data_sz) {
- /* disallow live data mode for jumbo frames */
- if (do_live)
- goto free_ctx;
- size = max_data_sz;
+ meta_sz = ctx->data;
+ linear_sz = ctx->data_end;
}
- data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
+ max_linear_sz = PAGE_SIZE - headroom - tailroom;
+ linear_sz = min_t(u32, linear_sz, max_linear_sz);
+
+ /* disallow live data mode for jumbo frames */
+ if (do_live && kattr->test.data_size_in > linear_sz)
+ goto free_ctx;
+
+ if (kattr->test.data_size_in - meta_sz < ETH_HLEN)
+ goto free_ctx;
+
+ data = bpf_test_init(kattr, linear_sz, max_linear_sz, headroom, tailroom);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
goto free_ctx;
}
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
- rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
+ rxqueue->xdp_rxq.frag_size = PAGE_SIZE;
xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
- xdp_prepare_buff(&xdp, data, headroom, size, true);
+ xdp_prepare_buff(&xdp, data, headroom, linear_sz, true);
sinfo = xdp_get_shared_info_from_buff(&xdp);
ret = xdp_convert_md_to_buff(ctx, &xdp);
if (ret)
goto free_data;
+ size = linear_sz;
if (unlikely(kattr->test.data_size_in > size)) {
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
@@ -1368,7 +1366,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
- struct bpf_test_timer t = { NO_PREEMPT };
+ struct bpf_test_timer t = {};
u32 size = kattr->test.data_size_in;
struct bpf_flow_dissector ctx = {};
u32 repeat = kattr->test.repeat;
@@ -1436,7 +1434,7 @@ out:
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
- struct bpf_test_timer t = { NO_PREEMPT };
+ struct bpf_test_timer t = {};
struct bpf_prog_array *progs = NULL;
struct bpf_sk_lookup_kern ctx = {};
u32 repeat = kattr->test.repeat;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 0adeafe11a36..c37e52e2f29a 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -37,6 +37,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
int err;
if (netif_is_bridge_master(dev)) {
+ struct net_bridge *br = netdev_priv(dev);
+
+ if (event == NETDEV_REGISTER)
+ br_fdb_change_mac_address(br, dev->dev_addr);
+
err = br_vlan_bridge_event(dev, event, ptr);
if (err)
return notifier_from_errno(err);
@@ -74,9 +79,9 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
if (br->dev->addr_assign_type == NET_ADDR_SET)
break;
prechaddr_info = ptr;
- err = dev_pre_changeaddr_notify(br->dev,
- prechaddr_info->dev_addr,
- extack);
+ err = netif_pre_changeaddr_notify(br->dev,
+ prechaddr_info->dev_addr,
+ extack);
if (err)
return notifier_from_errno(err);
break;
@@ -259,6 +264,23 @@ static struct notifier_block br_switchdev_blocking_notifier = {
.notifier_call = br_switchdev_blocking_event,
};
+static int
+br_toggle_fdb_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0) == on)
+ return 0;
+
+ err = br_fdb_toggle_local_vlan_0(br, on, extack);
+ if (err)
+ return err;
+
+ br_opt_toggle(br, BROPT_FDB_LOCAL_VLAN_0, on);
+ return 0;
+}
+
/* br_boolopt_toggle - change user-controlled boolean option
*
* @br: bridge device
@@ -287,6 +309,9 @@ int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION:
br_opt_toggle(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION, on);
break;
+ case BR_BOOLOPT_FDB_LOCAL_VLAN_0:
+ err = br_toggle_fdb_local_vlan_0(br, on, extack);
+ break;
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
@@ -307,6 +332,8 @@ int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt)
return br_opt_get(br, BROPT_MST_ENABLED);
case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION:
return br_opt_get(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION);
+ case BR_BOOLOPT_FDB_LOCAL_VLAN_0:
+ return br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
@@ -324,6 +351,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
int err = 0;
int opt_id;
+ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
+ if (opt_id != BITS_PER_LONG) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
+ opt_id);
+ return -EINVAL;
+ }
+
for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
bool on = !!(bm->optval & BIT(opt_id));
@@ -484,3 +518,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(BR_VERSION);
MODULE_ALIAS_RTNL_LINK("bridge");
MODULE_DESCRIPTION("Ethernet bridge driver");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/net/bridge/br_cfm.c b/net/bridge/br_cfm.c
index a3c755d0a09d..c2c1c7d44c61 100644
--- a/net/bridge/br_cfm.c
+++ b/net/bridge/br_cfm.c
@@ -134,7 +134,7 @@ static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
* of the configured CC 'expected_interval'
* in order to detect CCM defect after 3.25 interval.
*/
- queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
+ queue_delayed_work(system_percpu_wq, &peer_mep->ccm_rx_dwork,
usecs_to_jiffies(interval_us / 4));
}
@@ -285,7 +285,7 @@ static void ccm_tx_work_expired(struct work_struct *work)
ccm_frame_tx(skb);
interval_us = interval_to_us(mep->cc_config.exp_interval);
- queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
+ queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork,
usecs_to_jiffies(interval_us));
}
@@ -809,7 +809,7 @@ int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
* to send first frame immediately
*/
mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
- queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
+ queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork, 0);
save:
mep->cc_ccm_tx_info = *tx_info;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 902694c0ce64..58d22e2b85fc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -459,6 +459,9 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_fdb_entry *f;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
+ bool local_vlan_0;
+
+ local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
@@ -468,11 +471,11 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
/* delete old one */
fdb_delete_local(br, p, f);
- /* if this port has no vlan information
- * configured, we can safely be done at
- * this point.
+ /* if this port has no vlan information configured, or
+ * local entries are only kept on VLAN 0, we can safely
+ * be done at this point.
*/
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto insert;
}
}
@@ -481,7 +484,7 @@ insert:
/* insert new address, may fail if invalid address or dup. */
fdb_add_local(br, p, newaddr, 0);
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto done;
/* Now add entries for every VLAN configured on the port.
@@ -500,6 +503,9 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge_vlan *v;
+ bool local_vlan_0;
+
+ local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
spin_lock_bh(&br->hash_lock);
@@ -511,7 +517,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
fdb_add_local(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
@@ -576,6 +582,102 @@ void br_fdb_cleanup(struct work_struct *work)
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
+static void br_fdb_delete_locals_per_vlan_port(struct net_bridge *br,
+ struct net_bridge_port *p)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+ struct net_device *dev;
+
+ if (p) {
+ vg = nbp_vlan_group(p);
+ dev = p->dev;
+ } else {
+ vg = br_vlan_group(br);
+ dev = br->dev;
+ }
+
+ list_for_each_entry(v, &vg->vlan_list, vlist)
+ br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
+}
+
+static void br_fdb_delete_locals_per_vlan(struct net_bridge *br)
+{
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list)
+ br_fdb_delete_locals_per_vlan_port(br, p);
+
+ br_fdb_delete_locals_per_vlan_port(br, NULL);
+}
+
+static int br_fdb_insert_locals_per_vlan_port(struct net_bridge *br,
+ struct net_bridge_port *p,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+ struct net_device *dev;
+ int err;
+
+ if (p) {
+ vg = nbp_vlan_group(p);
+ dev = p->dev;
+ } else {
+ vg = br_vlan_group(br);
+ dev = br->dev;
+ }
+
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+
+ err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int br_fdb_insert_locals_per_vlan(struct net_bridge *br,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_port *p;
+ int err;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list) {
+ err = br_fdb_insert_locals_per_vlan_port(br, p, extack);
+ if (err)
+ goto rollback;
+ }
+
+ err = br_fdb_insert_locals_per_vlan_port(br, NULL, extack);
+ if (err)
+ goto rollback;
+
+ return 0;
+
+rollback:
+ NL_SET_ERR_MSG_MOD(extack, "fdb_local_vlan_0 toggle: FDB entry insertion failed");
+ br_fdb_delete_locals_per_vlan(br);
+ return err;
+}
+
+int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack)
+{
+ if (!on)
+ return br_fdb_insert_locals_per_vlan(br, extack);
+
+ br_fdb_delete_locals_per_vlan(br);
+ return 0;
+}
+
static bool __fdb_flush_matches(const struct net_bridge *br,
const struct net_bridge_fdb_entry *f,
const struct net_bridge_fdb_flush_desc *desc)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 29097e984b4f..870bdf2e082c 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -148,7 +148,8 @@ void br_forward(const struct net_bridge_port *to,
goto out;
/* redirect to backup link if the destination port is down */
- if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
+ if (rcu_access_pointer(to->backup_port) &&
+ (!netif_carrier_ok(to->dev) || !netif_running(to->dev))) {
struct net_bridge_port *backup_port;
backup_port = rcu_dereference(to->backup_port);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2450690f98cf..98c5b9c3145f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -668,7 +668,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
/* Ask for permission to use this MAC address now, even if we
* don't end up choosing it below.
*/
- err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
+ err = netif_pre_changeaddr_notify(br->dev, dev->dev_addr,
+ extack);
if (err)
goto err6;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5f6ac9bf1527..67b4c905e49a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -202,6 +202,14 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
break;
case BR_PKT_UNICAST:
dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ if (unlikely(!dst && vid &&
+ br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0))) {
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, 0);
+ if (dst &&
+ (!test_bit(BR_FDB_LOCAL, &dst->flags) ||
+ test_bit(BR_FDB_ADDED_BY_USER, &dst->flags)))
+ dst = NULL;
+ }
break;
default:
break;
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index fd2de35ffb3c..3c36fa24bc05 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -341,7 +341,7 @@ static void br_mrp_test_work_expired(struct work_struct *work)
out:
rcu_read_unlock();
- queue_delayed_work(system_wq, &mrp->test_work,
+ queue_delayed_work(system_percpu_wq, &mrp->test_work,
usecs_to_jiffies(mrp->test_interval));
}
@@ -418,7 +418,7 @@ static void br_mrp_in_test_work_expired(struct work_struct *work)
out:
rcu_read_unlock();
- queue_delayed_work(system_wq, &mrp->in_test_work,
+ queue_delayed_work(system_percpu_wq, &mrp->in_test_work,
usecs_to_jiffies(mrp->in_test_interval));
}
@@ -725,7 +725,7 @@ int br_mrp_start_test(struct net_bridge *br,
mrp->test_max_miss = test->max_miss;
mrp->test_monitor = test->monitor;
mrp->test_count_miss = 0;
- queue_delayed_work(system_wq, &mrp->test_work,
+ queue_delayed_work(system_percpu_wq, &mrp->test_work,
usecs_to_jiffies(test->interval));
return 0;
@@ -865,7 +865,7 @@ int br_mrp_start_in_test(struct net_bridge *br,
mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
mrp->in_test_max_miss = in_test->max_miss;
mrp->in_test_count_miss = 0;
- queue_delayed_work(system_wq, &mrp->in_test_work,
+ queue_delayed_work(system_percpu_wq, &mrp->in_test_work,
usecs_to_jiffies(in_test->interval));
return 0;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0224ef3dfec0..22d12e545966 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2015,10 +2015,19 @@ void br_multicast_port_ctx_init(struct net_bridge_port *port,
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
{
+ struct net_bridge *br = pmctx->port->br;
+ bool del = false;
+
#if IS_ENABLED(CONFIG_IPV6)
timer_delete_sync(&pmctx->ip6_mc_router_timer);
#endif
timer_delete_sync(&pmctx->ip4_mc_router_timer);
+
+ spin_lock_bh(&br->multicast_lock);
+ del |= br_ip6_multicast_rport_del(pmctx);
+ del |= br_ip4_multicast_rport_del(pmctx);
+ br_multicast_rport_del_notify(pmctx, del);
+ spin_unlock_bh(&br->multicast_lock);
}
int br_multicast_add_port(struct net_bridge_port *port)
@@ -4040,8 +4049,7 @@ int br_multicast_rcv(struct net_bridge_mcast **brmctx,
}
static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
- struct bridge_mcast_own_query *query,
- struct bridge_mcast_querier *querier)
+ struct bridge_mcast_own_query *query)
{
spin_lock(&brmctx->br->multicast_lock);
if (br_multicast_ctx_vlan_disabled(brmctx))
@@ -4060,8 +4068,7 @@ static void br_ip4_multicast_query_expired(struct timer_list *t)
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
ip4_own_query.timer);
- br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
- &brmctx->ip4_querier);
+ br_multicast_query_expired(brmctx, &brmctx->ip4_own_query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -4070,8 +4077,7 @@ static void br_ip6_multicast_query_expired(struct timer_list *t)
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
ip6_own_query.timer);
- br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
- &brmctx->ip6_querier);
+ br_multicast_query_expired(brmctx, &brmctx->ip6_own_query);
}
#endif
@@ -4809,6 +4815,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
}
+ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
+ br_info(brmctx->br,
+ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
+ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
+ }
+
brmctx->multicast_query_interval = intvl_jiffies;
}
@@ -4825,6 +4839,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
}
+ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
+ br_info(brmctx->br,
+ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
+ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
+ }
+
brmctx->multicast_startup_query_interval = intvl_jiffies;
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 94cbe967d1c1..083e2fe96441 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -626,9 +626,6 @@ static unsigned int br_nf_local_in(void *priv,
break;
}
- ct = container_of(nfct, struct nf_conn, ct_general);
- WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
-
return ret;
}
#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6e337937d0d7..4e2d53b27221 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -479,7 +479,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
hdr->ifi_index = dev->ifindex;
- hdr->ifi_flags = dev_get_flags(dev);
+ hdr->ifi_flags = netif_get_flags(dev);
hdr->ifi_change = 0;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b159aae594c0..16be5d250402 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -31,6 +31,8 @@
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
+#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
+#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
#define BR_HWDOM_MAX BITS_PER_LONG
@@ -485,6 +487,7 @@ enum net_bridge_opts {
BROPT_MCAST_VLAN_SNOOPING_ENABLED,
BROPT_MST_ENABLED,
BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION,
+ BROPT_FDB_LOCAL_VLAN_0,
};
struct net_bridge {
@@ -841,6 +844,8 @@ void br_fdb_find_delete_local(struct net_bridge *br,
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
void br_fdb_cleanup(struct work_struct *work);
+int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack);
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p, u16 vid, int do_all);
struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 95d7355a0407..fe3f7bbe86ee 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -17,6 +17,9 @@ static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
return false;
+ if (br_multicast_igmp_type(skb))
+ return false;
+
return (p->flags & BR_TX_FWD_OFFLOAD) &&
(p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
}
@@ -834,7 +837,7 @@ int br_switchdev_port_offload(struct net_bridge_port *p,
struct netdev_phys_item_id ppid;
int err;
- err = dev_get_port_parent_id(dev, &ppid, false);
+ err = netif_get_port_parent_id(dev, &ppid, false);
if (err)
return err;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c1176a5e02c4..cb4855ed9500 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1026,7 +1026,7 @@ static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
.mode = 0444, },
- .read_new = brforward_read,
+ .read = brforward_read,
};
/*
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 939a3aa78d5c..ce72b837ff8e 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -331,10 +331,12 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
/* Add the dev mac and count the vlan only if it's usable */
if (br_vlan_should_use(v)) {
- err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
- if (err) {
- br_err(br, "failed insert local address into bridge forwarding table\n");
- goto out_filt;
+ if (!br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0)) {
+ err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
+ if (err) {
+ br_err(br, "failed insert local address into bridge forwarding table\n");
+ goto out_filt;
+ }
}
vg->num_vlans++;
}
@@ -1455,7 +1457,7 @@ void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
return;
- vg = br_vlan_group(br);
+ vg = br_vlan_group_rcu(br);
if (idx >= 0 &&
ctx->vlan[idx].proto == br->vlan_proto) {
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index f16bbbbb9481..4fd5a6ea26b4 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -42,8 +42,9 @@ config NF_CONNTRACK_BRIDGE
# old sockopt interface and eval loop
config BRIDGE_NF_EBTABLES_LEGACY
tristate "Legacy EBTABLES support"
- depends on BRIDGE && NETFILTER_XTABLES
- default n
+ depends on BRIDGE && NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
+ default n
help
Legacy ebtables packet/frame classifier.
This is not needed if you are using ebtables over nftables
@@ -65,7 +66,7 @@ if BRIDGE_NF_EBTABLES
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
- select BRIDGE_NF_EBTABLES_LEGACY
+ depends on BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
@@ -76,7 +77,7 @@ config BRIDGE_EBT_BROUTE
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
- select BRIDGE_NF_EBTABLES_LEGACY
+ depends on BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
@@ -86,7 +87,7 @@ config BRIDGE_EBT_T_FILTER
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
- select BRIDGE_NF_EBTABLES_LEGACY
+ depends on BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 3e67d4aff419..5697e3949a36 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -920,8 +920,8 @@ static int translate_table(struct net *net, const char *name,
* if an error occurs
*/
newinfo->chainstack =
- vmalloc(array_size(nr_cpu_ids,
- sizeof(*(newinfo->chainstack))));
+ vmalloc_array(nr_cpu_ids,
+ sizeof(*(newinfo->chainstack)));
if (!newinfo->chainstack)
return -ENOMEM;
for_each_possible_cpu(i) {
@@ -938,7 +938,7 @@ static int translate_table(struct net *net, const char *name,
}
}
- cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s)));
+ cl_s = vmalloc_array(udc_cnt, sizeof(*cl_s));
if (!cl_s)
return -ENOMEM;
i = 0; /* the i'th udc */
@@ -1018,8 +1018,8 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
* the check on the size is done later, when we have the lock
*/
if (repl->num_counters) {
- unsigned long size = repl->num_counters * sizeof(*counterstmp);
- counterstmp = vmalloc(size);
+ counterstmp = vmalloc_array(repl->num_counters,
+ sizeof(*counterstmp));
if (!counterstmp)
return -ENOMEM;
}
@@ -1386,7 +1386,7 @@ static int do_update_counters(struct net *net, const char *name,
if (num_counters == 0)
return -EINVAL;
- tmp = vmalloc(array_size(num_counters, sizeof(*tmp)));
+ tmp = vmalloc_array(num_counters, sizeof(*tmp));
if (!tmp)
return -ENOMEM;
@@ -1526,7 +1526,7 @@ static int copy_counters_to_user(struct ebt_table *t,
if (num_counters != nentries)
return -EINVAL;
- counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
+ counterstmp = vmalloc_array(nentries, sizeof(*counterstmp));
if (!counterstmp)
return -ENOMEM;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 5adced1e7d0c..b7af36bbd306 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -59,6 +59,13 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
nft_reg_store_be16(dest, htons(p_proto));
return;
}
+ case NFT_META_BRI_IIFHWADDR:
+ br_dev = nft_meta_get_bridge(in);
+ if (!br_dev)
+ goto err;
+
+ memcpy(dest, br_dev->dev_addr, ETH_ALEN);
+ return;
default:
return nft_meta_get_eval(expr, regs, pkt);
}
@@ -86,6 +93,9 @@ static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
case NFT_META_BRI_IIFVPROTO:
len = sizeof(u16);
break;
+ case NFT_META_BRI_IIFHWADDR:
+ len = ETH_ALEN;
+ break;
default:
return nft_meta_get_init(ctx, expr, tb);
}
@@ -175,6 +185,7 @@ static int nft_meta_bridge_set_validate(const struct nft_ctx *ctx,
switch (priv->key) {
case NFT_META_BRI_BROUTE:
+ case NFT_META_BRI_IIFHWADDR:
hooks = 1 << NF_BR_PRE_ROUTING;
break;
default:
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 20139fa1be1f..2aa1e7d46eb2 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -257,9 +257,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
cfpkt_add_body(pkt, &tmp16, 2);
tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
cfpkt_add_body(pkt, &tmp16, 2);
- memset(utility_name, 0, sizeof(utility_name));
- strscpy(utility_name, param->u.utility.name,
- UTILITY_NAME_LENGTH);
+ strscpy_pad(utility_name, param->u.utility.name);
cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
tmp8 = param->u.utility.paramlen;
cfpkt_add_body(pkt, &tmp8, 1);
@@ -351,17 +349,154 @@ int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
return found;
}
+static int cfctrl_link_setup(struct cfctrl *cfctrl, struct cfpkt *pkt, u8 cmdrsp)
+{
+ u8 len;
+ u8 linkid = 0;
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ u8 endpoint;
+ u8 physlinkid;
+ u8 prio;
+ u8 tmp;
+ u8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ struct cfctrl_request_info rsp, *req;
+
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ tmp = cfpkt_extr_head_u8(pkt);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ tmp = cfpkt_extr_head_u8(pkt);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ endpoint = cfpkt_extr_head_u8(pkt);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ tmp = cfpkt_extr_head_u8(pkt);
+ linkparam.u.video.connid = tmp;
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ linkparam.u.datagram.connid = cfpkt_extr_head_u32(pkt);
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ linkparam.u.rfm.connid = cfpkt_extr_head_u32(pkt);
+ cp = (u8 *) linkparam.u.rfm.volume;
+ for (tmp = cfpkt_extr_head_u8(pkt);
+ cfpkt_more(pkt) && tmp != '\0';
+ tmp = cfpkt_extr_head_u8(pkt))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ linkparam.u.utility.fifosize_kb = cfpkt_extr_head_u16(pkt);
+ /* Fifosize bufs */
+ linkparam.u.utility.fifosize_bufs = cfpkt_extr_head_u16(pkt);
+ /* name */
+ cp = (u8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0; i < UTILITY_NAME_LENGTH && cfpkt_more(pkt); i++) {
+ tmp = cfpkt_extr_head_u8(pkt);
+ *cp++ = tmp;
+ }
+ /* Length */
+ len = cfpkt_extr_head_u8(pkt);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ tmp = cfpkt_extr_head_u8(pkt);
+ *cp++ = tmp;
+ }
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ /* Length */
+ len = cfpkt_extr_head_u8(pkt);
+ /* Param Data */
+ cfpkt_extr_head(pkt, NULL, len);
+ break;
+ default:
+ pr_warn("Request setup, invalid type (%d)\n", serv);
+ return -1;
+ }
+
+ rsp.cmd = CFCTRL_CMD_LINK_SETUP;
+ rsp.param = linkparam;
+ spin_lock_bh(&cfctrl->info_list_lock);
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+ cfpkt_erroneous(pkt)) {
+ pr_err("Invalid O/E bit or parse error "
+ "on CAIF control channel\n");
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 0,
+ req ? req->client_layer : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.layer.up, linkid,
+ serv, physlinkid,
+ req ? req->client_layer : NULL);
+ }
+
+ kfree(req);
+
+ spin_unlock_bh(&cfctrl->info_list_lock);
+
+ return 0;
+}
+
static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
{
u8 cmdrsp;
u8 cmd;
- int ret = -1;
- u8 len;
- u8 param[255];
+ int ret = 0;
u8 linkid = 0;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfctrl_request_info rsp, *req;
-
cmdrsp = cfpkt_extr_head_u8(pkt);
cmd = cmdrsp & CFCTRL_CMD_MASK;
@@ -374,150 +509,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
switch (cmd) {
case CFCTRL_CMD_LINK_SETUP:
- {
- enum cfctrl_srv serv;
- enum cfctrl_srv servtype;
- u8 endpoint;
- u8 physlinkid;
- u8 prio;
- u8 tmp;
- u8 *cp;
- int i;
- struct cfctrl_link_param linkparam;
- memset(&linkparam, 0, sizeof(linkparam));
-
- tmp = cfpkt_extr_head_u8(pkt);
-
- serv = tmp & CFCTRL_SRV_MASK;
- linkparam.linktype = serv;
-
- servtype = tmp >> 4;
- linkparam.chtype = servtype;
-
- tmp = cfpkt_extr_head_u8(pkt);
- physlinkid = tmp & 0x07;
- prio = tmp >> 3;
-
- linkparam.priority = prio;
- linkparam.phyid = physlinkid;
- endpoint = cfpkt_extr_head_u8(pkt);
- linkparam.endpoint = endpoint & 0x03;
-
- switch (serv) {
- case CFCTRL_SRV_VEI:
- case CFCTRL_SRV_DBG:
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
- case CFCTRL_SRV_VIDEO:
- tmp = cfpkt_extr_head_u8(pkt);
- linkparam.u.video.connid = tmp;
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
-
- case CFCTRL_SRV_DATAGRAM:
- linkparam.u.datagram.connid =
- cfpkt_extr_head_u32(pkt);
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
- case CFCTRL_SRV_RFM:
- /* Construct a frame, convert
- * DatagramConnectionID
- * to network format long and copy it out...
- */
- linkparam.u.rfm.connid =
- cfpkt_extr_head_u32(pkt);
- cp = (u8 *) linkparam.u.rfm.volume;
- for (tmp = cfpkt_extr_head_u8(pkt);
- cfpkt_more(pkt) && tmp != '\0';
- tmp = cfpkt_extr_head_u8(pkt))
- *cp++ = tmp;
- *cp = '\0';
-
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
-
- break;
- case CFCTRL_SRV_UTIL:
- /* Construct a frame, convert
- * DatagramConnectionID
- * to network format long and copy it out...
- */
- /* Fifosize KB */
- linkparam.u.utility.fifosize_kb =
- cfpkt_extr_head_u16(pkt);
- /* Fifosize bufs */
- linkparam.u.utility.fifosize_bufs =
- cfpkt_extr_head_u16(pkt);
- /* name */
- cp = (u8 *) linkparam.u.utility.name;
- caif_assert(sizeof(linkparam.u.utility.name)
- >= UTILITY_NAME_LENGTH);
- for (i = 0;
- i < UTILITY_NAME_LENGTH
- && cfpkt_more(pkt); i++) {
- tmp = cfpkt_extr_head_u8(pkt);
- *cp++ = tmp;
- }
- /* Length */
- len = cfpkt_extr_head_u8(pkt);
- linkparam.u.utility.paramlen = len;
- /* Param Data */
- cp = linkparam.u.utility.params;
- while (cfpkt_more(pkt) && len--) {
- tmp = cfpkt_extr_head_u8(pkt);
- *cp++ = tmp;
- }
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- /* Length */
- len = cfpkt_extr_head_u8(pkt);
- /* Param Data */
- cfpkt_extr_head(pkt, &param, len);
- break;
- default:
- pr_warn("Request setup, invalid type (%d)\n",
- serv);
- goto error;
- }
-
- rsp.cmd = cmd;
- rsp.param = linkparam;
- spin_lock_bh(&cfctrl->info_list_lock);
- req = cfctrl_remove_req(cfctrl, &rsp);
-
- if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
- cfpkt_erroneous(pkt)) {
- pr_err("Invalid O/E bit or parse error "
- "on CAIF control channel\n");
- cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
- 0,
- req ? req->client_layer
- : NULL);
- } else {
- cfctrl->res.linksetup_rsp(cfctrl->serv.
- layer.up, linkid,
- serv, physlinkid,
- req ? req->
- client_layer : NULL);
- }
-
- kfree(req);
-
- spin_unlock_bh(&cfctrl->info_list_lock);
- }
+ ret = cfctrl_link_setup(cfctrl, pkt, cmdrsp);
break;
case CFCTRL_CMD_LINK_DESTROY:
linkid = cfpkt_extr_head_u8(pkt);
@@ -544,9 +536,9 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
break;
default:
pr_err("Unrecognized Control Frame\n");
+ ret = -1;
goto error;
}
- ret = 0;
error:
cfpkt_destroy(pkt);
return ret;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 4aab7033c933..770173d8db42 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -221,7 +221,7 @@ int can_send(struct sk_buff *skb, int loop)
}
/* Make sure the CAN frame can pass the selected CAN netdevice. */
- if (unlikely(skb->len > skb->dev->mtu)) {
+ if (unlikely(skb->len > READ_ONCE(skb->dev->mtu))) {
err = -EMSGSIZE;
goto inval_skb;
}
@@ -683,7 +683,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CAN_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -698,7 +698,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANFD_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -713,7 +713,7 @@ static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANXL_RX_INVALID_FRAME);
return NET_RX_DROP;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6bc1cc4c94c5..5e690a2377e4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -359,6 +359,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
unsigned int datalen = head->nframes * op->cfsiz;
int err;
unsigned int *pflags;
+ enum skb_drop_reason reason;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -413,11 +414,11 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
addr->can_family = AF_CAN;
addr->can_ifindex = op->rx_ifindex;
- err = sock_queue_rcv_skb(sk, skb);
+ err = sock_queue_rcv_skb_reason(sk, skb, &reason);
if (err < 0) {
struct bcm_sock *bo = bcm_sk(sk);
- kfree_skb(skb);
+ sk_skb_reason_drop(sk, skb, reason);
/* don't care about overflows in this statistic */
bo->dropped_usr_msgs++;
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 1efa377f002e..74ee1e52249b 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -278,6 +278,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
{
struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
+ enum skb_drop_reason reason;
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
@@ -285,8 +286,8 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static u8 padlen(u8 datalen)
@@ -1312,7 +1313,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
err = -ENODEV;
goto out;
}
- if (dev->mtu < so->ll.mtu) {
+ if (READ_ONCE(dev->mtu) < so->ll.mtu) {
dev_put(dev);
err = -EINVAL;
goto out;
diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
index 39844f14eed8..797719cb227e 100644
--- a/net/can/j1939/bus.c
+++ b/net/can/j1939/bus.c
@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
if (!ecu)
ecu = j1939_ecu_create_locked(priv, name);
err = PTR_ERR_OR_ZERO(ecu);
- if (err)
+ if (err) {
+ if (j1939_address_is_unicast(sa))
+ priv->ents[sa].nusers--;
goto done;
+ }
ecu->nusers++;
/* TODO: do we care if ecu->addr != sa? */
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index 31a93cae5111..81f58924b4ac 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -212,6 +212,7 @@ void j1939_priv_get(struct j1939_priv *priv);
/* notify/alert all j1939 sockets bound to ifindex */
void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv);
int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
void j1939_tp_init(struct j1939_priv *priv);
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 7e8a20f2fc42..a93af55df5fd 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -377,6 +377,11 @@ static int j1939_netdev_notify(struct notifier_block *nb,
j1939_sk_netdev_event_netdown(priv);
j1939_ecu_unmap_all(priv);
break;
+ case NETDEV_UNREGISTER:
+ j1939_cancel_active_session(priv, NULL);
+ j1939_sk_netdev_event_netdown(priv);
+ j1939_sk_netdev_event_unregister(priv);
+ break;
}
j1939_priv_put(priv);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 6fefe7a68761..88e7160d4248 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -311,6 +311,7 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
{
const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
struct j1939_sk_buff_cb *skcb;
+ enum skb_drop_reason reason;
struct sk_buff *skb;
if (oskb->sk == &jsk->sk)
@@ -331,8 +332,8 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
if (skb->sk)
skcb->msg_flags |= MSG_DONTROUTE;
- if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0)
+ sk_skb_reason_drop(&jsk->sk, skb, reason);
}
bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
@@ -520,6 +521,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
if (ret) {
j1939_netdev_stop(priv);
+ jsk->priv = NULL;
+ synchronize_rcu();
+ j1939_priv_put(priv);
goto out_release_sock;
}
@@ -1299,6 +1303,55 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
read_unlock_bh(&priv->j1939_socks_lock);
}
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
+{
+ struct sock *sk;
+ struct j1939_sock *jsk;
+ bool wait_rcu = false;
+
+rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
+ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ /* Skip if j1939_jsk_add() is not called on this socket. */
+ if (!(jsk->state & J1939_SOCK_BOUND))
+ continue;
+ sk = &jsk->sk;
+ sock_hold(sk);
+ read_unlock_bh(&priv->j1939_socks_lock);
+ /* Check if j1939_jsk_del() is not yet called on this socket after holding
+ * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
+ * j1939_jsk_del() with socket's lock held.
+ */
+ lock_sock(sk);
+ if (jsk->state & J1939_SOCK_BOUND) {
+ /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
+ * Make this socket no longer bound, by pretending as if j1939_sk_bind()
+ * dropped old references but did not get new references.
+ */
+ j1939_jsk_del(priv, jsk);
+ j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
+ j1939_netdev_stop(priv);
+ /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
+ * calling the corresponding j1939_priv_put().
+ *
+ * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
+ * an RCU grace period. But since the caller is holding a ref on this
+ * "priv", we can defer synchronize_rcu() until immediately before
+ * the caller calls j1939_priv_put().
+ */
+ j1939_priv_put(priv);
+ jsk->priv = NULL;
+ wait_rcu = true;
+ }
+ release_sock(sk);
+ sock_put(sk);
+ goto rescan;
+ }
+ read_unlock_bh(&priv->j1939_socks_lock);
+ if (wait_rcu)
+ synchronize_rcu();
+}
+
static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
diff --git a/net/can/raw.c b/net/can/raw.c
index 020f21430b1d..a53853f5e9af 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,31 +75,31 @@ MODULE_ALIAS("can-proto-1");
*/
struct uniqframe {
- int skbcnt;
const struct sk_buff *skb;
+ int skbcnt;
unsigned int join_rx_count;
};
struct raw_sock {
struct sock sk;
- int bound;
- int ifindex;
struct net_device *dev;
netdevice_tracker dev_tracker;
struct list_head notifier;
- int loopback;
- int recv_own_msgs;
- int fd_frames;
- int xl_frames;
+ int ifindex;
+ unsigned int bound:1;
+ unsigned int loopback:1;
+ unsigned int recv_own_msgs:1;
+ unsigned int fd_frames:1;
+ unsigned int xl_frames:1;
+ unsigned int join_filters:1;
struct can_raw_vcid_options raw_vcid_opts;
canid_t tx_vcid_shifted;
canid_t rx_vcid_shifted;
canid_t rx_vcid_mask_shifted;
- int join_filters;
+ can_err_mask_t err_mask;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
- can_err_mask_t err_mask;
struct uniqframe __percpu *uniq;
};
@@ -129,6 +129,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
{
struct sock *sk = (struct sock *)data;
struct raw_sock *ro = raw_sk(sk);
+ enum skb_drop_reason reason;
struct sockaddr_can *addr;
struct sk_buff *skb;
unsigned int *pflags;
@@ -205,8 +206,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (oskb->sk == sk)
*pflags |= MSG_CONFIRM;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static int raw_enable_filters(struct net *net, struct net_device *dev,
@@ -559,8 +560,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
struct can_filter sfilter; /* single filter */
struct net_device *dev = NULL;
can_err_mask_t err_mask = 0;
- int fd_frames;
int count = 0;
+ int flag;
int err = 0;
if (level != SOL_CAN_RAW)
@@ -681,44 +682,48 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
case CAN_RAW_LOOPBACK:
- if (optlen != sizeof(ro->loopback))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->loopback, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->loopback = !!flag;
break;
case CAN_RAW_RECV_OWN_MSGS:
- if (optlen != sizeof(ro->recv_own_msgs))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->recv_own_msgs = !!flag;
break;
case CAN_RAW_FD_FRAMES:
- if (optlen != sizeof(fd_frames))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&fd_frames, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
/* Enabling CAN XL includes CAN FD */
- if (ro->xl_frames && !fd_frames)
+ if (ro->xl_frames && !flag)
return -EINVAL;
- ro->fd_frames = fd_frames;
+ ro->fd_frames = !!flag;
break;
case CAN_RAW_XL_FRAMES:
- if (optlen != sizeof(ro->xl_frames))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->xl_frames = !!flag;
+
/* Enabling CAN XL includes CAN FD */
if (ro->xl_frames)
ro->fd_frames = ro->xl_frames;
@@ -738,12 +743,13 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
case CAN_RAW_JOIN_FILTERS:
- if (optlen != sizeof(ro->join_filters))
+ if (optlen != sizeof(flag))
return -EINVAL;
- if (copy_from_sockptr(&ro->join_filters, optval, optlen))
+ if (copy_from_sockptr(&flag, optval, optlen))
return -EFAULT;
+ ro->join_filters = !!flag;
break;
default:
@@ -757,6 +763,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
+ int flag;
int len;
void *val;
@@ -805,25 +812,29 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
case CAN_RAW_LOOPBACK:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->loopback;
+ flag = ro->loopback;
+ val = &flag;
break;
case CAN_RAW_RECV_OWN_MSGS:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->recv_own_msgs;
+ flag = ro->recv_own_msgs;
+ val = &flag;
break;
case CAN_RAW_FD_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->fd_frames;
+ flag = ro->fd_frames;
+ val = &flag;
break;
case CAN_RAW_XL_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->xl_frames;
+ flag = ro->xl_frames;
+ val = &flag;
break;
case CAN_RAW_XL_VCID_OPTS: {
@@ -848,7 +859,8 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
- val = &ro->join_filters;
+ flag = ro->join_filters;
+ val = &flag;
break;
default:
@@ -949,7 +961,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
err = -EINVAL;
/* check for valid CAN (CC/FD/XL) frame content */
- txmtu = raw_check_txframe(ro, skb, dev->mtu);
+ txmtu = raw_check_txframe(ro, skb, READ_ONCE(dev->mtu));
if (!txmtu)
goto free_skb;
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index 0aa21fcbf6ec..ea60e3ef0834 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -6,8 +6,7 @@ config CEPH_LIB
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_GCM
- select CRYPTO_HMAC
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
select CRYPTO
select KEYS
default n
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index d1b5705dc0c6..f8181acaf870 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,7 +252,8 @@ int __init ceph_msgr_init(void)
* The number of active work items is limited by the number of
* connections, so leave @max_active at default.
*/
- ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
+ ceph_msgr_wq = alloc_workqueue("ceph-msgr",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (ceph_msgr_wq)
return 0;
@@ -1524,7 +1525,7 @@ static void con_fault_finish(struct ceph_connection *con)
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
*/
- if (con->v1.auth_retry) {
+ if (!ceph_msgr2(from_msgr(con->msgr)) && con->v1.auth_retry) {
dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
if (con->ops->invalidate_authorizer)
con->ops->invalidate_authorizer(con);
@@ -1714,9 +1715,10 @@ static void clear_standby(struct ceph_connection *con)
{
/* come back from STANDBY? */
if (con->state == CEPH_CON_S_STANDBY) {
- dout("clear_standby %p and ++connect_seq\n", con);
+ dout("clear_standby %p\n", con);
con->state = CEPH_CON_S_PREOPEN;
- con->v1.connect_seq++;
+ if (!ceph_msgr2(from_msgr(con->msgr)))
+ con->v1.connect_seq++;
WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
}
@@ -1792,9 +1794,9 @@ void ceph_msg_revoke(struct ceph_msg *msg)
WARN_ON(con->state != CEPH_CON_S_OPEN);
dout("%s con %p msg %p was sending\n", __func__, con, msg);
if (ceph_msgr2(from_msgr(con->msgr)))
- ceph_con_v2_revoke(con);
+ ceph_con_v2_revoke(con, msg);
else
- ceph_con_v1_revoke(con);
+ ceph_con_v1_revoke(con, msg);
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
} else {
@@ -2109,11 +2111,13 @@ int ceph_con_in_msg_alloc(struct ceph_connection *con,
return ret;
}
-void ceph_con_get_out_msg(struct ceph_connection *con)
+struct ceph_msg *ceph_con_get_out_msg(struct ceph_connection *con)
{
struct ceph_msg *msg;
- BUG_ON(list_empty(&con->out_queue));
+ if (list_empty(&con->out_queue))
+ return NULL;
+
msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
WARN_ON(msg->con != con);
@@ -2140,7 +2144,7 @@ void ceph_con_get_out_msg(struct ceph_connection *con)
* message or in case of a fault.
*/
WARN_ON(con->out_msg);
- con->out_msg = ceph_msg_get(msg);
+ return con->out_msg = ceph_msg_get(msg);
}
/*
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 0cb61c76b9b8..c9e002d96319 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -169,10 +169,9 @@ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
* Prepare footer for currently outgoing message, and finish things
* off. Assumes out_kvec* are already valid.. we just add on to the end.
*/
-static void prepare_write_message_footer(struct ceph_connection *con)
+static void prepare_write_message_footer(struct ceph_connection *con,
+ struct ceph_msg *m)
{
- struct ceph_msg *m = con->out_msg;
-
m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
dout("prepare_write_message_footer %p\n", con);
@@ -192,9 +191,9 @@ static void prepare_write_message_footer(struct ceph_connection *con)
/*
* Prepare headers for the next outgoing message.
*/
-static void prepare_write_message(struct ceph_connection *con)
+static void prepare_write_message(struct ceph_connection *con,
+ struct ceph_msg *m)
{
- struct ceph_msg *m;
u32 crc;
con_out_kvec_reset(con);
@@ -210,9 +209,6 @@ static void prepare_write_message(struct ceph_connection *con)
&con->v1.out_temp_ack);
}
- ceph_con_get_out_msg(con);
- m = con->out_msg;
-
dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
m, con->out_seq, le16_to_cpu(m->hdr.type),
le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
@@ -231,31 +227,31 @@ static void prepare_write_message(struct ceph_connection *con)
/* fill in hdr crc and finalize hdr */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
- con->out_msg->hdr.crc = cpu_to_le32(crc);
- memcpy(&con->v1.out_hdr, &con->out_msg->hdr, sizeof(con->v1.out_hdr));
+ m->hdr.crc = cpu_to_le32(crc);
+ memcpy(&con->v1.out_hdr, &m->hdr, sizeof(con->v1.out_hdr));
/* fill in front and middle crc, footer */
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
- con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ m->footer.front_crc = cpu_to_le32(crc);
if (m->middle) {
crc = crc32c(0, m->middle->vec.iov_base,
m->middle->vec.iov_len);
- con->out_msg->footer.middle_crc = cpu_to_le32(crc);
+ m->footer.middle_crc = cpu_to_le32(crc);
} else
- con->out_msg->footer.middle_crc = 0;
+ m->footer.middle_crc = 0;
dout("%s front_crc %u middle_crc %u\n", __func__,
- le32_to_cpu(con->out_msg->footer.front_crc),
- le32_to_cpu(con->out_msg->footer.middle_crc));
- con->out_msg->footer.flags = 0;
+ le32_to_cpu(m->footer.front_crc),
+ le32_to_cpu(m->footer.middle_crc));
+ m->footer.flags = 0;
/* is there a data payload? */
- con->out_msg->footer.data_crc = 0;
+ m->footer.data_crc = 0;
if (m->data_length) {
- prepare_message_data(con->out_msg, m->data_length);
+ prepare_message_data(m, m->data_length);
con->v1.out_more = 1; /* data + footer will follow */
} else {
/* no, queue up footer too and be done */
- prepare_write_message_footer(con);
+ prepare_write_message_footer(con, m);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
@@ -462,9 +458,9 @@ out:
* 0 -> socket full, but more to do
* <0 -> error
*/
-static int write_partial_message_data(struct ceph_connection *con)
+static int write_partial_message_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
struct ceph_msg_data_cursor *cursor = &msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
u32 crc;
@@ -516,7 +512,7 @@ static int write_partial_message_data(struct ceph_connection *con)
else
msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
con_out_kvec_reset(con);
- prepare_write_message_footer(con);
+ prepare_write_message_footer(con, msg);
return 1; /* must return > 0 to indicate success */
}
@@ -1472,6 +1468,7 @@ bad_tag:
*/
int ceph_con_v1_try_write(struct ceph_connection *con)
{
+ struct ceph_msg *msg;
int ret = 1;
dout("try_write start %p state %d\n", con, con->state);
@@ -1518,14 +1515,15 @@ more:
}
/* msg pages? */
- if (con->out_msg) {
+ msg = con->out_msg;
+ if (msg) {
if (con->v1.out_msg_done) {
- ceph_msg_put(con->out_msg);
+ ceph_msg_put(msg);
con->out_msg = NULL; /* we're done with this one */
goto do_next;
}
- ret = write_partial_message_data(con);
+ ret = write_partial_message_data(con, msg);
if (ret == 1)
goto more; /* we need to send the footer, too! */
if (ret == 0)
@@ -1545,8 +1543,8 @@ do_next:
goto more;
}
/* is anything else pending? */
- if (!list_empty(&con->out_queue)) {
- prepare_write_message(con);
+ if ((msg = ceph_con_get_out_msg(con)) != NULL) {
+ prepare_write_message(con, msg);
goto more;
}
if (con->in_seq > con->in_seq_acked) {
@@ -1564,10 +1562,8 @@ out:
return ret;
}
-void ceph_con_v1_revoke(struct ceph_connection *con)
+void ceph_con_v1_revoke(struct ceph_connection *con, struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
-
WARN_ON(con->v1.out_skip);
/* footer */
if (con->v1.out_msg_done) {
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index bd608ffa0627..9e39378eda00 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -709,7 +709,7 @@ static int setup_crypto(struct ceph_connection *con,
dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
__func__, con, con->v2.con_mode, session_key_len, con_secret_len);
- WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
+ WARN_ON(con->v2.hmac_key_set || con->v2.gcm_tfm || con->v2.gcm_req);
if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
con->v2.con_mode != CEPH_CON_MODE_SECURE) {
@@ -723,22 +723,8 @@ static int setup_crypto(struct ceph_connection *con,
return 0; /* auth_none */
}
- noio_flag = memalloc_noio_save();
- con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- memalloc_noio_restore(noio_flag);
- if (IS_ERR(con->v2.hmac_tfm)) {
- ret = PTR_ERR(con->v2.hmac_tfm);
- con->v2.hmac_tfm = NULL;
- pr_err("failed to allocate hmac tfm context: %d\n", ret);
- return ret;
- }
-
- ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
- session_key_len);
- if (ret) {
- pr_err("failed to set hmac key: %d\n", ret);
- return ret;
- }
+ hmac_sha256_preparekey(&con->v2.hmac_key, session_key, session_key_len);
+ con->v2.hmac_key_set = true;
if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
WARN_ON(con_secret_len);
@@ -793,38 +779,26 @@ static int setup_crypto(struct ceph_connection *con,
return 0; /* auth_x, secure mode */
}
-static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
- int kvec_cnt, u8 *hmac)
+static void ceph_hmac_sha256(struct ceph_connection *con,
+ const struct kvec *kvecs, int kvec_cnt,
+ u8 hmac[SHA256_DIGEST_SIZE])
{
- SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
- int ret;
+ struct hmac_sha256_ctx ctx;
int i;
- dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
- con->v2.hmac_tfm, kvec_cnt);
+ dout("%s con %p hmac_key_set %d kvec_cnt %d\n", __func__, con,
+ con->v2.hmac_key_set, kvec_cnt);
- if (!con->v2.hmac_tfm) {
+ if (!con->v2.hmac_key_set) {
memset(hmac, 0, SHA256_DIGEST_SIZE);
- return 0; /* auth_none */
+ return; /* auth_none */
}
- desc->tfm = con->v2.hmac_tfm;
- ret = crypto_shash_init(desc);
- if (ret)
- goto out;
-
- for (i = 0; i < kvec_cnt; i++) {
- ret = crypto_shash_update(desc, kvecs[i].iov_base,
- kvecs[i].iov_len);
- if (ret)
- goto out;
- }
-
- ret = crypto_shash_final(desc, hmac);
-
-out:
- shash_desc_zero(desc);
- return ret; /* auth_x, both plain and secure modes */
+ /* auth_x, both plain and secure modes */
+ hmac_sha256_init(&ctx, &con->v2.hmac_key);
+ for (i = 0; i < kvec_cnt; i++)
+ hmac_sha256_update(&ctx, kvecs[i].iov_base, kvecs[i].iov_len);
+ hmac_sha256_final(&ctx, hmac);
}
static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -1455,17 +1429,14 @@ static int prepare_auth_request_more(struct ceph_connection *con,
static int prepare_auth_signature(struct ceph_connection *con)
{
void *buf;
- int ret;
buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
con_secure(con)));
if (!buf)
return -ENOMEM;
- ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
- CTRL_BODY(buf));
- if (ret)
- return ret;
+ ceph_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
+ CTRL_BODY(buf));
return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
SHA256_DIGEST_SIZE);
@@ -1589,10 +1560,11 @@ static int prepare_ack(struct ceph_connection *con)
return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
}
-static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
+static void prepare_epilogue_plain(struct ceph_connection *con,
+ struct ceph_msg *msg, bool aborted)
{
dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
- con->out_msg, aborted, con->v2.out_epil.front_crc,
+ msg, aborted, con->v2.out_epil.front_crc,
con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
encode_epilogue_plain(con, aborted);
@@ -1603,10 +1575,9 @@ static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
* For "used" empty segments, crc is -1. For unused (trailing)
* segments, crc is 0.
*/
-static void prepare_message_plain(struct ceph_connection *con)
+static void prepare_message_plain(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
- struct ceph_msg *msg = con->out_msg;
-
prepare_head_plain(con, con->v2.out_buf,
sizeof(struct ceph_msg_header2), NULL, 0, false);
@@ -1647,7 +1618,7 @@ static void prepare_message_plain(struct ceph_connection *con)
con->v2.out_state = OUT_S_QUEUE_DATA;
} else {
con->v2.out_epil.data_crc = 0;
- prepare_epilogue_plain(con, false);
+ prepare_epilogue_plain(con, msg, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
}
@@ -1659,7 +1630,8 @@ static void prepare_message_plain(struct ceph_connection *con)
* allocate pages for the entire tail of the message (currently up
* to ~32M) and two sgs arrays (up to ~256K each)...
*/
-static int prepare_message_secure(struct ceph_connection *con)
+static int prepare_message_secure(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
void *zerop = page_address(ceph_zero_page);
struct sg_table enc_sgt = {};
@@ -1674,7 +1646,7 @@ static int prepare_message_secure(struct ceph_connection *con)
if (ret)
return ret;
- tail_len = tail_onwire_len(con->out_msg, true);
+ tail_len = tail_onwire_len(msg, true);
if (!tail_len) {
/*
* Empty message: once the head is written,
@@ -1685,7 +1657,7 @@ static int prepare_message_secure(struct ceph_connection *con)
}
encode_epilogue_secure(con, false);
- ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
+ ret = setup_message_sgs(&sgt, msg, zerop, zerop, zerop,
&con->v2.out_epil, NULL, 0, false);
if (ret)
goto out;
@@ -1714,7 +1686,7 @@ static int prepare_message_secure(struct ceph_connection *con)
goto out;
dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
- con->out_msg, sgt.orig_nents, enc_page_cnt);
+ msg, sgt.orig_nents, enc_page_cnt);
con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
out:
@@ -1723,19 +1695,19 @@ out:
return ret;
}
-static int prepare_message(struct ceph_connection *con)
+static int prepare_message(struct ceph_connection *con, struct ceph_msg *msg)
{
int lens[] = {
sizeof(struct ceph_msg_header2),
- front_len(con->out_msg),
- middle_len(con->out_msg),
- data_len(con->out_msg)
+ front_len(msg),
+ middle_len(msg),
+ data_len(msg)
};
struct ceph_frame_desc desc;
int ret;
dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
- con->out_msg, lens[0], lens[1], lens[2], lens[3]);
+ msg, lens[0], lens[1], lens[2], lens[3]);
if (con->in_seq > con->in_seq_acked) {
dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
@@ -1746,15 +1718,15 @@ static int prepare_message(struct ceph_connection *con)
reset_out_kvecs(con);
init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
encode_preamble(&desc, con->v2.out_buf);
- fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
+ fill_header2(CTRL_BODY(con->v2.out_buf), &msg->hdr,
con->in_seq_acked);
if (con_secure(con)) {
- ret = prepare_message_secure(con);
+ ret = prepare_message_secure(con, msg);
if (ret)
return ret;
} else {
- prepare_message_plain(con);
+ prepare_message_plain(con, msg);
}
ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
@@ -2460,10 +2432,8 @@ static int process_auth_signature(struct ceph_connection *con,
return -EINVAL;
}
- ret = hmac_sha256(con, con->v2.out_sign_kvecs,
- con->v2.out_sign_kvec_cnt, hmac);
- if (ret)
- return ret;
+ ceph_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
+ hmac);
ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
@@ -3184,20 +3154,20 @@ int ceph_con_v2_try_read(struct ceph_connection *con)
}
}
-static void queue_data(struct ceph_connection *con)
+static void queue_data(struct ceph_connection *con, struct ceph_msg *msg)
{
struct bio_vec bv;
con->v2.out_epil.data_crc = -1;
- ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
- data_len(con->out_msg));
+ ceph_msg_data_cursor_init(&con->v2.out_cursor, msg,
+ data_len(msg));
get_bvec_at(&con->v2.out_cursor, &bv);
set_out_bvec(con, &bv, true);
con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
}
-static void queue_data_cont(struct ceph_connection *con)
+static void queue_data_cont(struct ceph_connection *con, struct ceph_msg *msg)
{
struct bio_vec bv;
@@ -3218,7 +3188,7 @@ static void queue_data_cont(struct ceph_connection *con)
* we are done.
*/
reset_out_kvecs(con);
- prepare_epilogue_plain(con, false);
+ prepare_epilogue_plain(con, msg, false);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
@@ -3250,7 +3220,7 @@ static void queue_enc_page(struct ceph_connection *con)
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
-static void queue_zeros(struct ceph_connection *con)
+static void queue_zeros(struct ceph_connection *con, struct ceph_msg *msg)
{
dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
@@ -3267,7 +3237,7 @@ static void queue_zeros(struct ceph_connection *con)
* Once it's written, we are done patching up for the revoke.
*/
reset_out_kvecs(con);
- prepare_epilogue_plain(con, true);
+ prepare_epilogue_plain(con, msg, true);
con->v2.out_state = OUT_S_FINISH_MESSAGE;
}
@@ -3294,6 +3264,7 @@ static void finish_message(struct ceph_connection *con)
static int populate_out_iter(struct ceph_connection *con)
{
+ struct ceph_msg *msg;
int ret;
dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
@@ -3309,18 +3280,18 @@ static int populate_out_iter(struct ceph_connection *con)
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
WARN_ON(!con->out_msg);
- queue_data(con);
+ queue_data(con, con->out_msg);
goto populated;
case OUT_S_QUEUE_DATA_CONT:
WARN_ON(!con->out_msg);
- queue_data_cont(con);
+ queue_data_cont(con, con->out_msg);
goto populated;
case OUT_S_QUEUE_ENC_PAGE:
queue_enc_page(con);
goto populated;
case OUT_S_QUEUE_ZEROS:
WARN_ON(con->out_msg); /* revoked */
- queue_zeros(con);
+ queue_zeros(con, con->out_msg);
goto populated;
case OUT_S_FINISH_MESSAGE:
finish_message(con);
@@ -3339,9 +3310,8 @@ static int populate_out_iter(struct ceph_connection *con)
pr_err("prepare_keepalive2 failed: %d\n", ret);
return ret;
}
- } else if (!list_empty(&con->out_queue)) {
- ceph_con_get_out_msg(con);
- ret = prepare_message(con);
+ } else if ((msg = ceph_con_get_out_msg(con)) != NULL) {
+ ret = prepare_message(con, msg);
if (ret) {
pr_err("prepare_message failed: %d\n", ret);
return ret;
@@ -3453,17 +3423,18 @@ static u32 crc32c_zeros(u32 crc, int zero_len)
return crc;
}
-static void prepare_zero_front(struct ceph_connection *con, int resid)
+static void prepare_zero_front(struct ceph_connection *con,
+ struct ceph_msg *msg, int resid)
{
int sent;
- WARN_ON(!resid || resid > front_len(con->out_msg));
- sent = front_len(con->out_msg) - resid;
+ WARN_ON(!resid || resid > front_len(msg));
+ sent = front_len(msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.front_crc =
- crc32c(-1, con->out_msg->front.iov_base, sent);
+ crc32c(-1, msg->front.iov_base, sent);
con->v2.out_epil.front_crc =
crc32c_zeros(con->v2.out_epil.front_crc, resid);
} else {
@@ -3474,17 +3445,18 @@ static void prepare_zero_front(struct ceph_connection *con, int resid)
out_zero_add(con, resid);
}
-static void prepare_zero_middle(struct ceph_connection *con, int resid)
+static void prepare_zero_middle(struct ceph_connection *con,
+ struct ceph_msg *msg, int resid)
{
int sent;
- WARN_ON(!resid || resid > middle_len(con->out_msg));
- sent = middle_len(con->out_msg) - resid;
+ WARN_ON(!resid || resid > middle_len(msg));
+ sent = middle_len(msg) - resid;
dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
if (sent) {
con->v2.out_epil.middle_crc =
- crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
+ crc32c(-1, msg->middle->vec.iov_base, sent);
con->v2.out_epil.middle_crc =
crc32c_zeros(con->v2.out_epil.middle_crc, resid);
} else {
@@ -3495,61 +3467,64 @@ static void prepare_zero_middle(struct ceph_connection *con, int resid)
out_zero_add(con, resid);
}
-static void prepare_zero_data(struct ceph_connection *con)
+static void prepare_zero_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
dout("%s con %p\n", __func__, con);
- con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
- out_zero_add(con, data_len(con->out_msg));
+ con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(msg));
+ out_zero_add(con, data_len(msg));
}
-static void revoke_at_queue_data(struct ceph_connection *con)
+static void revoke_at_queue_data(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int boundary;
int resid;
- WARN_ON(!data_len(con->out_msg));
+ WARN_ON(!data_len(msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
- boundary = front_len(con->out_msg) + middle_len(con->out_msg);
+ boundary = front_len(msg) + middle_len(msg);
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
- if (front_len(con->out_msg))
- prepare_zero_front(con, front_len(con->out_msg));
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
- prepare_zero_data(con);
+ if (front_len(msg))
+ prepare_zero_front(con, msg, front_len(msg));
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
+ prepare_zero_data(con, msg);
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
- boundary = middle_len(con->out_msg);
+ boundary = middle_len(msg);
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
- prepare_zero_front(con, resid);
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
- prepare_zero_data(con);
- queue_zeros(con);
+ prepare_zero_front(con, msg, resid);
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
+ prepare_zero_data(con, msg);
+ queue_zeros(con, msg);
return;
}
WARN_ON(!resid);
dout("%s con %p was sending middle\n", __func__, con);
- prepare_zero_middle(con, resid);
- prepare_zero_data(con);
- queue_zeros(con);
+ prepare_zero_middle(con, msg, resid);
+ prepare_zero_data(con, msg);
+ queue_zeros(con, msg);
}
-static void revoke_at_queue_data_cont(struct ceph_connection *con)
+static void revoke_at_queue_data_cont(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int sent, resid; /* current piece of data */
- WARN_ON(!data_len(con->out_msg));
+ WARN_ON(!data_len(msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
@@ -3568,10 +3543,11 @@ static void revoke_at_queue_data_cont(struct ceph_connection *con)
con->v2.out_iter.count -= resid;
out_zero_add(con, con->v2.out_cursor.total_resid);
- queue_zeros(con);
+ queue_zeros(con, msg);
}
-static void revoke_at_finish_message(struct ceph_connection *con)
+static void revoke_at_finish_message(struct ceph_connection *con,
+ struct ceph_msg *msg)
{
int boundary;
int resid;
@@ -3579,39 +3555,39 @@ static void revoke_at_finish_message(struct ceph_connection *con)
WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
resid = iov_iter_count(&con->v2.out_iter);
- if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
- !data_len(con->out_msg)) {
+ if (!front_len(msg) && !middle_len(msg) &&
+ !data_len(msg)) {
WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head (empty message) - noop\n",
__func__, con);
return;
}
- boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
+ boundary = front_len(msg) + middle_len(msg) +
CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
dout("%s con %p was sending head\n", __func__, con);
- if (front_len(con->out_msg))
- prepare_zero_front(con, front_len(con->out_msg));
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
+ if (front_len(msg))
+ prepare_zero_front(con, msg, front_len(msg));
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
con->v2.out_state = OUT_S_QUEUE_ZEROS;
return;
}
- boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
+ boundary = middle_len(msg) + CEPH_EPILOGUE_PLAIN_LEN;
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending front\n", __func__, con);
- prepare_zero_front(con, resid);
- if (middle_len(con->out_msg))
- prepare_zero_middle(con, middle_len(con->out_msg));
+ prepare_zero_front(con, msg, resid);
+ if (middle_len(msg))
+ prepare_zero_middle(con, msg, middle_len(msg));
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
- queue_zeros(con);
+ queue_zeros(con, msg);
return;
}
@@ -3619,9 +3595,9 @@ static void revoke_at_finish_message(struct ceph_connection *con)
if (resid > boundary) {
resid -= boundary;
dout("%s con %p was sending middle\n", __func__, con);
- prepare_zero_middle(con, resid);
+ prepare_zero_middle(con, msg, resid);
con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
- queue_zeros(con);
+ queue_zeros(con, msg);
return;
}
@@ -3629,7 +3605,7 @@ static void revoke_at_finish_message(struct ceph_connection *con)
dout("%s con %p was sending epilogue - noop\n", __func__, con);
}
-void ceph_con_v2_revoke(struct ceph_connection *con)
+void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg)
{
WARN_ON(con->v2.out_zero);
@@ -3642,13 +3618,13 @@ void ceph_con_v2_revoke(struct ceph_connection *con)
switch (con->v2.out_state) {
case OUT_S_QUEUE_DATA:
- revoke_at_queue_data(con);
+ revoke_at_queue_data(con, msg);
break;
case OUT_S_QUEUE_DATA_CONT:
- revoke_at_queue_data_cont(con);
+ revoke_at_queue_data_cont(con, msg);
break;
case OUT_S_FINISH_MESSAGE:
- revoke_at_finish_message(con);
+ revoke_at_finish_message(con, msg);
break;
default:
WARN(1, "bad out_state %d", con->v2.out_state);
@@ -3814,10 +3790,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
- if (con->v2.hmac_tfm) {
- crypto_free_shash(con->v2.hmac_tfm);
- con->v2.hmac_tfm = NULL;
- }
+ memzero_explicit(&con->v2.hmac_key, sizeof(con->v2.hmac_key));
+ con->v2.hmac_key_set = false;
if (con->v2.gcm_req) {
aead_request_free(con->v2.gcm_req);
con->v2.gcm_req = NULL;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index ab66b599ac47..c227ececa925 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -314,7 +314,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
delay = CEPH_MONC_PING_INTERVAL;
dout("__schedule_delayed after %lu\n", delay);
- mod_delayed_work(system_wq, &monc->delayed_work,
+ mod_delayed_work(system_percpu_wq, &monc->delayed_work,
round_jiffies_relative(delay));
}
diff --git a/net/core/Makefile b/net/core/Makefile
index b2a76ce33932..9ef2099c5426 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-y += hotdata.o
obj-y += netdev_rx_queue.o
+obj-y += netdev_queues.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 94cc4705e91d..c285c6465923 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -345,7 +345,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
spin_unlock_bh(&sk_queue->lock);
}
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
return err;
}
EXPORT_SYMBOL(__sk_queue_drop_skb);
@@ -618,6 +618,20 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
+int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len)
+{
+ struct iov_iter_state state;
+ int ret;
+
+ iov_iter_save_state(from, &state);
+ ret = skb_copy_datagram_from_iter(skb, offset, from, len);
+ if (ret)
+ iov_iter_restore(from, &state);
+ return ret;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iter_full);
+
int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
@@ -906,21 +920,22 @@ fault:
EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
- * datagram_poll - generic datagram poll
+ * datagram_poll_queue - same as datagram_poll, but on a specific receive
+ * queue
* @file: file struct
* @sock: socket
* @wait: poll table
+ * @rcv_queue: receive queue to poll
*
- * Datagram poll: Again totally generic. This also handles
- * sequenced packet sockets providing the socket receive queue
- * is only ever holding data ready to receive.
+ * Performs polling on the given receive queue, handling shutdown, error,
+ * and connection state. This is useful for protocols that deliver
+ * userspace-bound packets through a custom queue instead of
+ * sk->sk_receive_queue.
*
- * Note: when you *don't* use this routine for this protocol,
- * and you use a different write policy from sock_writeable()
- * then please supply your own write_space callback.
+ * Return: poll bitmask indicating the socket's current state
*/
-__poll_t datagram_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ poll_table *wait, struct sk_buff_head *rcv_queue)
{
struct sock *sk = sock->sk;
__poll_t mask;
@@ -942,7 +957,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(rcv_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
@@ -964,4 +979,27 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
return mask;
}
+EXPORT_SYMBOL(datagram_poll_queue);
+
+/**
+ * datagram_poll - generic datagram poll
+ * @file: file struct
+ * @sock: socket
+ * @wait: poll table
+ *
+ * Datagram poll: Again totally generic. This also handles
+ * sequenced packet sockets providing the socket receive queue
+ * is only ever holding data ready to receive.
+ *
+ * Note: when you *don't* use this routine for this protocol,
+ * and you use a different write policy from sock_writeable()
+ * then please supply your own write_space callback.
+ *
+ * Return: poll bitmask indicating the socket's current state
+ */
+__poll_t datagram_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ return datagram_poll_queue(file, sock, wait,
+ &sock->sk->sk_receive_queue);
+}
EXPORT_SYMBOL(datagram_poll);
diff --git a/net/core/dev.c b/net/core/dev.c
index be97c440ecd5..2acfa44927da 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1267,33 +1267,32 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
- * __dev_get_by_flags - find any device with given flags
- * @net: the applicable net namespace
- * @if_flags: IFF_* values
- * @mask: bitmask of bits in if_flags to check
+ * netdev_get_by_flags_rcu - find any device with given flags
+ * @net: the applicable net namespace
+ * @tracker: tracking object for the acquired reference
+ * @if_flags: IFF_* values
+ * @mask: bitmask of bits in if_flags to check
+ *
+ * Search for any interface with the given flags.
*
- * Search for any interface with the given flags. Returns NULL if a device
- * is not found or a pointer to the device. Must be called inside
- * rtnl_lock(), and result refcount is unchanged.
+ * Context: rcu_read_lock() must be held.
+ * Returns: NULL if a device is not found or a pointer to the device.
*/
-
-struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
- unsigned short mask)
+struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
+ unsigned short if_flags, unsigned short mask)
{
- struct net_device *dev, *ret;
-
- ASSERT_RTNL();
+ struct net_device *dev;
- ret = NULL;
- for_each_netdev(net, dev) {
- if (((dev->flags ^ if_flags) & mask) == 0) {
- ret = dev;
- break;
+ for_each_netdev_rcu(net, dev) {
+ if (((READ_ONCE(dev->flags) ^ if_flags) & mask) == 0) {
+ netdev_hold(dev, tracker, GFP_ATOMIC);
+ return dev;
}
}
- return ret;
+
+ return NULL;
}
-EXPORT_SYMBOL(__dev_get_by_flags);
+EXPORT_IPV6_MOD(netdev_get_by_flags_rcu);
/**
* dev_valid_name - check if name is okay for network device
@@ -1769,7 +1768,7 @@ static void __dev_close(struct net_device *dev)
list_del(&single);
}
-void dev_close_many(struct list_head *head, bool unlink)
+void netif_close_many(struct list_head *head, bool unlink)
{
struct net_device *dev, *tmp;
@@ -1787,7 +1786,7 @@ void dev_close_many(struct list_head *head, bool unlink)
list_del_init(&dev->close_list);
}
}
-EXPORT_SYMBOL(dev_close_many);
+EXPORT_SYMBOL_NS_GPL(netif_close_many, "NETDEV_INTERNAL");
void netif_close(struct net_device *dev)
{
@@ -1795,7 +1794,7 @@ void netif_close(struct net_device *dev)
LIST_HEAD(single);
list_add(&dev->close_list, &single);
- dev_close_many(&single, true);
+ netif_close_many(&single, true);
list_del(&single);
}
}
@@ -3179,7 +3178,6 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING) {
- ASSERT_RTNL();
netdev_ops_assert_locked(dev);
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -3229,7 +3227,6 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
- ASSERT_RTNL();
netdev_ops_assert_locked(dev);
rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
@@ -3771,8 +3768,14 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
features &= ~dev->gso_partial_features;
- /* Make sure to clear the IPv4 ID mangling feature if the
- * IPv4 header has the potential to be fragmented.
+ /* Make sure to clear the IPv4 ID mangling feature if the IPv4 header
+ * has the potential to be fragmented so that TSO does not generate
+ * segments with the same ID. For encapsulated packets, the ID mangling
+ * feature is guaranteed not to use the same ID for the outer IPv4
+ * headers of the generated segments if the headers have the potential
+ * to be fragmented, so there is no need to clear the IPv4 ID mangling
+ * feature (see the section about NETIF_F_TSO_MANGLEID in
+ * segmentation-offloads.rst).
*/
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
struct iphdr *iph = skb->encapsulation ?
@@ -3782,6 +3785,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
features &= ~NETIF_F_TSO_MANGLEID;
}
+ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
+ * so neither does TSO that depends on it.
+ */
+ if (features & NETIF_F_IPV6_CSUM &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
+ !ipv6_has_hopopt_jumbo(skb))
+ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
+
return features;
}
@@ -3898,6 +3913,38 @@ sw_checksum:
}
EXPORT_SYMBOL(skb_csum_hwoffload_help);
+/* Checks if this SKB belongs to an HW offloaded socket
+ * and whether any SW fallbacks are required based on dev.
+ * Check decrypted mark in case skb_orphan() cleared socket.
+ */
+static struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev)
+{
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff *(*sk_validate)(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+ struct sock *sk = skb->sk;
+
+ sk_validate = NULL;
+ if (sk) {
+ if (sk_fullsock(sk))
+ sk_validate = sk->sk_validate_xmit_skb;
+ else if (sk_is_inet(sk) && sk->sk_state == TCP_TIME_WAIT)
+ sk_validate = inet_twsk(sk)->tw_validate_xmit_skb;
+ }
+
+ if (sk_validate) {
+ skb = sk_validate(sk, dev, skb);
+ } else if (unlikely(skb_is_decrypted(skb))) {
+ pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
+ kfree_skb(skb);
+ skb = NULL;
+ }
+#endif
+
+ return skb;
+}
+
static struct sk_buff *validate_xmit_unreadable_skb(struct sk_buff *skb,
struct net_device *dev)
{
@@ -4028,7 +4075,10 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
unsigned int hdr_len;
/* mac layer + network layer */
- hdr_len = skb_transport_offset(skb);
+ if (!skb->encapsulation)
+ hdr_len = skb_transport_offset(skb);
+ else
+ hdr_len = skb_inner_transport_offset(skb);
/* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
@@ -4798,7 +4848,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in
- * napi_enable()/dev_set_threaded().
+ * napi_enable()/netif_set_threaded().
* Use READ_ONCE() to guarantee a complete
* read on napi->thread. Only call
* wake_up_process() when it's not NULL.
@@ -4837,9 +4887,40 @@ static u32 rfs_slot(u32 hash, const struct rps_dev_flow_table *flow_table)
return hash_32(hash, flow_table->log);
}
+#ifdef CONFIG_RFS_ACCEL
+/**
+ * rps_flow_is_active - check whether the flow is recently active.
+ * @rflow: Specific flow to check activity.
+ * @flow_table: per-queue flowtable that @rflow belongs to.
+ * @cpu: CPU saved in @rflow.
+ *
+ * If the CPU has processed many packets since the flow's last activity
+ * (beyond 10 times the table size), the flow is considered stale.
+ *
+ * Return: true if flow was recently active.
+ */
+static bool rps_flow_is_active(struct rps_dev_flow *rflow,
+ struct rps_dev_flow_table *flow_table,
+ unsigned int cpu)
+{
+ unsigned int flow_last_active;
+ unsigned int sd_input_head;
+
+ if (cpu >= nr_cpu_ids)
+ return false;
+
+ sd_input_head = READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head);
+ flow_last_active = READ_ONCE(rflow->last_qtail);
+
+ return (int)(sd_input_head - flow_last_active) <
+ (int)(10 << flow_table->log);
+}
+#endif
+
static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow *rflow, u16 next_cpu)
+ struct rps_dev_flow *rflow, u16 next_cpu, u32 hash,
+ u32 flow_id)
{
if (next_cpu < nr_cpu_ids) {
u32 head;
@@ -4847,8 +4928,9 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *old_rflow;
+ struct rps_dev_flow *tmp_rflow;
+ unsigned int tmp_cpu;
u16 rxq_index;
- u32 flow_id;
int rc;
/* Should we steer this flow to a different hardware queue? */
@@ -4863,14 +4945,29 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (!flow_table)
goto out;
- flow_id = rfs_slot(skb_get_hash(skb), flow_table);
+
+ tmp_rflow = &flow_table->flows[flow_id];
+ tmp_cpu = READ_ONCE(tmp_rflow->cpu);
+
+ if (READ_ONCE(tmp_rflow->filter) != RPS_NO_FILTER) {
+ if (rps_flow_is_active(tmp_rflow, flow_table,
+ tmp_cpu)) {
+ if (hash != READ_ONCE(tmp_rflow->hash) ||
+ next_cpu == tmp_cpu)
+ goto out;
+ }
+ }
+
rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
rxq_index, flow_id);
if (rc < 0)
goto out;
+
old_rflow = rflow;
- rflow = &flow_table->flows[flow_id];
+ rflow = tmp_rflow;
WRITE_ONCE(rflow->filter, rc);
+ WRITE_ONCE(rflow->hash, hash);
+
if (old_rflow->filter == rc)
WRITE_ONCE(old_rflow->filter, RPS_NO_FILTER);
out:
@@ -4896,6 +4993,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow_table *flow_table;
struct rps_map *map;
int cpu = -1;
+ u32 flow_id;
u32 tcpu;
u32 hash;
@@ -4942,7 +5040,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
*/
- rflow = &flow_table->flows[rfs_slot(hash, flow_table)];
+ flow_id = rfs_slot(hash, flow_table);
+ rflow = &flow_table->flows[flow_id];
tcpu = rflow->cpu;
/*
@@ -4961,7 +5060,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
((int)(READ_ONCE(per_cpu(softnet_data, tcpu).input_queue_head) -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
- rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu, hash,
+ flow_id);
}
if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
@@ -5005,17 +5105,16 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
- unsigned int cpu;
rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id < (1UL << flow_table->log)) {
+ unsigned int cpu;
+
rflow = &flow_table->flows[flow_id];
cpu = READ_ONCE(rflow->cpu);
- if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids &&
- ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) -
- READ_ONCE(rflow->last_qtail)) <
- (int)(10 << flow_table->log)))
+ if (READ_ONCE(rflow->filter) == filter_id &&
+ rps_flow_is_active(rflow, flow_table, cpu))
expire = false;
}
rcu_read_unlock();
@@ -5081,8 +5180,9 @@ static void napi_schedule_rps(struct softnet_data *sd)
__napi_schedule_irqoff(&mysd->backlog);
}
-void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
+void kick_defer_list_purge(unsigned int cpu)
{
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
unsigned long flags;
if (use_backlog_threads()) {
@@ -5187,7 +5287,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
backlog_unlock_irq_restore(sd, &flags);
cpu_backlog_drop:
- atomic_inc(&sd->dropped);
+ numa_drop_add(&sd->drop_counters, 1);
bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
@@ -5749,6 +5849,7 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
struct packet_type **ppt_prev)
{
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_UNHANDLED_PROTO;
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
struct sk_buff *skb = *pskb;
@@ -5840,8 +5941,10 @@ skip_taps:
#endif
skb_reset_redirect(skb);
skip_classify:
- if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
+ if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) {
+ drop_reason = SKB_DROP_REASON_PFMEMALLOC;
goto drop;
+ }
if (skb_vlan_tag_present(skb)) {
if (pt_prev) {
@@ -5939,8 +6042,6 @@ check_vlan_id:
}
if (pt_prev) {
- if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
- goto drop;
*ppt_prev = pt_prev;
} else {
drop:
@@ -5948,7 +6049,8 @@ drop:
dev_core_stats_rx_dropped_inc(skb->dev);
else
dev_core_stats_rx_nohandler_inc(skb->dev);
- kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
+
+ kfree_skb_reason(skb, drop_reason);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
@@ -6576,8 +6678,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
* it, we need to bound somehow the time packets are kept in
* the GRO layer.
*/
- gro_flush(&n->gro, !!timeout);
- gro_normal_list(&n->gro);
+ gro_flush_normal(&n->gro, !!timeout);
if (unlikely(!list_empty(&n->poll_list))) {
/* If n->poll_list is not empty, we need to mask irqs */
@@ -6615,24 +6716,24 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
}
EXPORT_SYMBOL(napi_complete_done);
-static void skb_defer_free_flush(struct softnet_data *sd)
+static void skb_defer_free_flush(void)
{
+ struct llist_node *free_list;
struct sk_buff *skb, *next;
+ struct skb_defer_node *sdn;
+ int node;
- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
- if (!READ_ONCE(sd->defer_list))
- return;
+ for_each_node(node) {
+ sdn = this_cpu_ptr(net_hotdata.skb_defer_nodes) + node;
- spin_lock(&sd->defer_lock);
- skb = sd->defer_list;
- sd->defer_list = NULL;
- sd->defer_count = 0;
- spin_unlock(&sd->defer_lock);
+ if (llist_empty(&sdn->defer_list))
+ continue;
+ atomic_long_set(&sdn->defer_count, 0);
+ free_list = llist_del_all(&sdn->defer_list);
- while (skb != NULL) {
- next = skb->next;
- napi_consume_skb(skb, 1);
- skb = next;
+ llist_for_each_entry_safe(skb, next, free_list, ll_node) {
+ napi_consume_skb(skb, 1);
+ }
}
}
@@ -6647,8 +6748,7 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
}
/* Flush too old packets. If HZ < 1000, flush all packets */
- gro_flush(&napi->gro, HZ >= 1000);
- gro_normal_list(&napi->gro);
+ gro_flush_normal(&napi->gro, HZ >= 1000);
clear_bit(NAPI_STATE_SCHED, &napi->state);
}
@@ -6761,7 +6861,7 @@ count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
- skb_defer_free_flush(this_cpu_ptr(&softnet_data));
+ skb_defer_free_flush();
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
@@ -6926,22 +7026,89 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-int dev_set_threaded(struct net_device *dev, bool threaded)
+static void napi_stop_kthread(struct napi_struct *napi)
+{
+ unsigned long val, new;
+
+ /* Wait until the napi STATE_THREADED is unset. */
+ while (true) {
+ val = READ_ONCE(napi->state);
+
+ /* If napi kthread own this napi or the napi is idle,
+ * STATE_THREADED can be unset here.
+ */
+ if ((val & NAPIF_STATE_SCHED_THREADED) ||
+ !(val & NAPIF_STATE_SCHED)) {
+ new = val & (~NAPIF_STATE_THREADED);
+ } else {
+ msleep(20);
+ continue;
+ }
+
+ if (try_cmpxchg(&napi->state, &val, new))
+ break;
+ }
+
+ /* Once STATE_THREADED is unset, wait for SCHED_THREADED to be unset by
+ * the kthread.
+ */
+ while (true) {
+ if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state))
+ break;
+
+ msleep(20);
+ }
+
+ kthread_stop(napi->thread);
+ napi->thread = NULL;
+}
+
+int napi_set_threaded(struct napi_struct *napi,
+ enum netdev_napi_threaded threaded)
+{
+ if (threaded) {
+ if (!napi->thread) {
+ int err = napi_kthread_create(napi);
+
+ if (err)
+ return err;
+ }
+ }
+
+ if (napi->config)
+ napi->config->threaded = threaded;
+
+ /* Setting/unsetting threaded mode on a napi might not immediately
+ * take effect, if the current napi instance is actively being
+ * polled. In this case, the switch between threaded mode and
+ * softirq mode will happen in the next round of napi_schedule().
+ * This should not cause hiccups/stalls to the live traffic.
+ */
+ if (!threaded && napi->thread) {
+ napi_stop_kthread(napi);
+ } else {
+ /* Make sure kthread is created before THREADED bit is set. */
+ smp_mb__before_atomic();
+ assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
+ }
+
+ return 0;
+}
+
+int netif_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded)
{
struct napi_struct *napi;
- int err = 0;
+ int i, err = 0;
netdev_assert_locked_or_invisible(dev);
- if (dev->threaded == threaded)
- return 0;
-
if (threaded) {
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (!napi->thread) {
err = napi_kthread_create(napi);
if (err) {
- threaded = false;
+ threaded = NETDEV_NAPI_THREADED_DISABLED;
break;
}
}
@@ -6950,23 +7117,33 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
WRITE_ONCE(dev->threaded, threaded);
- /* Make sure kthread is created before THREADED bit
- * is set.
- */
- smp_mb__before_atomic();
-
- /* Setting/unsetting threaded mode on a napi might not immediately
- * take effect, if the current napi instance is actively being
- * polled. In this case, the switch between threaded mode and
- * softirq mode will happen in the next round of napi_schedule().
- * This should not cause hiccups/stalls to the live traffic.
- */
+ /* The error should not occur as the kthreads are already created. */
list_for_each_entry(napi, &dev->napi_list, dev_list)
- assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
+ WARN_ON_ONCE(napi_set_threaded(napi, threaded));
+
+ /* Override the config for all NAPIs even if currently not listed */
+ for (i = 0; i < dev->num_napi_configs; i++)
+ dev->napi_config[i].threaded = threaded;
return err;
}
-EXPORT_SYMBOL(dev_set_threaded);
+
+/**
+ * netif_threaded_enable() - enable threaded NAPIs
+ * @dev: net_device instance
+ *
+ * Enable threaded mode for the NAPI instances of the device. This may be useful
+ * for devices where multiple NAPI instances get scheduled by a single
+ * interrupt. Threaded NAPI allows moving the NAPI processing to cores other
+ * than the core where IRQ is mapped.
+ *
+ * This function should be called before @dev is registered.
+ */
+void netif_threaded_enable(struct net_device *dev)
+{
+ WARN_ON_ONCE(netif_set_threaded(dev, NETDEV_NAPI_THREADED_ENABLED));
+}
+EXPORT_SYMBOL(netif_threaded_enable);
/**
* netif_queue_set_napi - Associate queue with the napi
@@ -7182,6 +7359,8 @@ static void napi_restore_config(struct napi_struct *n)
napi_hash_add(n);
n->config->napi_id = n->napi_id;
}
+
+ WARN_ON_ONCE(napi_set_threaded(n, n->config->threaded));
}
static void napi_save_config(struct napi_struct *n)
@@ -7278,8 +7457,9 @@ void netif_napi_add_weight_locked(struct net_device *dev,
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
*/
- if (dev->threaded && napi_kthread_create(napi))
- dev->threaded = false;
+ if (napi_get_threaded_config(dev, napi))
+ if (napi_kthread_create(napi))
+ dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
netif_napi_set_irq_locked(napi, -1);
}
EXPORT_SYMBOL(netif_napi_add_weight_locked);
@@ -7448,8 +7628,7 @@ static int __napi_poll(struct napi_struct *n, bool *repoll)
}
/* Flush too old packets. If HZ < 1000, flush all packets */
- gro_flush(&n->gro, HZ >= 1000);
- gro_normal_list(&n->gro);
+ gro_flush_normal(&n->gro, HZ >= 1000);
/* Some drivers may have called napi_schedule
* prior to exhausting their budget.
@@ -7541,7 +7720,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
local_irq_disable();
net_rps_action_and_irq_enable(sd);
}
- skb_defer_free_flush(sd);
+ skb_defer_free_flush();
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable();
@@ -7583,7 +7762,7 @@ start:
for (;;) {
struct napi_struct *n;
- skb_defer_free_flush(sd);
+ skb_defer_free_flush();
if (list_empty(&list)) {
if (list_empty(&repoll)) {
@@ -9387,12 +9566,12 @@ void dev_set_rx_mode(struct net_device *dev)
}
/**
- * dev_get_flags - get flags reported to userspace
- * @dev: device
+ * netif_get_flags() - get flags reported to userspace
+ * @dev: device
*
- * Get the combination of flag bits exported through APIs to userspace.
+ * Get the combination of flag bits exported through APIs to userspace.
*/
-unsigned int dev_get_flags(const struct net_device *dev)
+unsigned int netif_get_flags(const struct net_device *dev)
{
unsigned int flags;
@@ -9415,7 +9594,7 @@ unsigned int dev_get_flags(const struct net_device *dev)
return flags;
}
-EXPORT_SYMBOL(dev_get_flags);
+EXPORT_SYMBOL(netif_get_flags);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack)
@@ -9527,7 +9706,7 @@ int netif_change_flags(struct net_device *dev, unsigned int flags,
return ret;
}
-int __dev_set_mtu(struct net_device *dev, int new_mtu)
+int __netif_set_mtu(struct net_device *dev, int new_mtu)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -9538,7 +9717,7 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
-EXPORT_SYMBOL(__dev_set_mtu);
+EXPORT_SYMBOL_NS_GPL(__netif_set_mtu, "NETDEV_INTERNAL");
int dev_validate_mtu(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
@@ -9557,18 +9736,22 @@ int dev_validate_mtu(struct net_device *dev, int new_mtu,
}
/**
- * netif_set_mtu_ext - Change maximum transfer unit
- * @dev: device
- * @new_mtu: new transfer unit
- * @extack: netlink extended ack
+ * netif_set_mtu_ext() - Change maximum transfer unit
+ * @dev: device
+ * @new_mtu: new transfer unit
+ * @extack: netlink extended ack
+ *
+ * Change the maximum transfer size of the network device.
*
- * Change the maximum transfer size of the network device.
+ * Return: 0 on success, -errno on failure.
*/
int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
struct netlink_ext_ack *extack)
{
int err, orig_mtu;
+ netdev_ops_assert_locked(dev);
+
if (new_mtu == dev->mtu)
return 0;
@@ -9585,7 +9768,7 @@ int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
return err;
orig_mtu = dev->mtu;
- err = __dev_set_mtu(dev, new_mtu);
+ err = __netif_set_mtu(dev, new_mtu);
if (!err) {
err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
@@ -9595,7 +9778,7 @@ int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
- __dev_set_mtu(dev, orig_mtu);
+ __netif_set_mtu(dev, orig_mtu);
call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
new_mtu);
}
@@ -9649,13 +9832,15 @@ void netif_set_group(struct net_device *dev, int new_group)
}
/**
- * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
- * @dev: device
- * @addr: new address
- * @extack: netlink extended ack
+ * netif_pre_changeaddr_notify() - Call NETDEV_PRE_CHANGEADDR.
+ * @dev: device
+ * @addr: new address
+ * @extack: netlink extended ack
+ *
+ * Return: 0 on success, -errno on failure.
*/
-int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
- struct netlink_ext_ack *extack)
+int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack)
{
struct netdev_notifier_pre_changeaddr_info info = {
.info.dev = dev,
@@ -9667,7 +9852,7 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
return notifier_to_errno(rc);
}
-EXPORT_SYMBOL(dev_pre_changeaddr_notify);
+EXPORT_SYMBOL_NS_GPL(netif_pre_changeaddr_notify, "NETDEV_INTERNAL");
int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack)
@@ -9681,7 +9866,7 @@ int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
- err = dev_pre_changeaddr_notify(dev, ss->__data, extack);
+ err = netif_pre_changeaddr_notify(dev, ss->__data, extack);
if (err)
return err;
if (memcmp(dev->dev_addr, ss->__data, dev->addr_len)) {
@@ -9698,7 +9883,7 @@ int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
DECLARE_RWSEM(dev_addr_sem);
/* "sa" is a true struct sockaddr with limited "sa_data" member. */
-int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
+int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
{
size_t size = sizeof(sa->sa_data_min);
struct net_device *dev;
@@ -9724,7 +9909,7 @@ unlock:
up_read(&dev_addr_sem);
return ret;
}
-EXPORT_SYMBOL(dev_get_mac_address);
+EXPORT_SYMBOL_NS_GPL(netif_get_mac_address, "NETDEV_INTERNAL");
int netif_change_carrier(struct net_device *dev, bool new_carrier)
{
@@ -9777,16 +9962,17 @@ int dev_get_phys_port_name(struct net_device *dev,
}
/**
- * dev_get_port_parent_id - Get the device's port parent identifier
- * @dev: network device
- * @ppid: pointer to a storage for the port's parent identifier
- * @recurse: allow/disallow recursion to lower devices
+ * netif_get_port_parent_id() - Get the device's port parent identifier
+ * @dev: network device
+ * @ppid: pointer to a storage for the port's parent identifier
+ * @recurse: allow/disallow recursion to lower devices
+ *
+ * Get the devices's port parent identifier.
*
- * Get the devices's port parent identifier
+ * Return: 0 on success, -errno on failure.
*/
-int dev_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid,
- bool recurse)
+int netif_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct netdev_phys_item_id first = { };
@@ -9805,7 +9991,7 @@ int dev_get_port_parent_id(struct net_device *dev,
return err;
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = dev_get_port_parent_id(lower_dev, ppid, true);
+ err = netif_get_port_parent_id(lower_dev, ppid, true);
if (err)
break;
if (!first.id_len)
@@ -9816,7 +10002,7 @@ int dev_get_port_parent_id(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL(dev_get_port_parent_id);
+EXPORT_SYMBOL(netif_get_port_parent_id);
/**
* netdev_port_same_parent_id - Indicate if two network devices have
@@ -9829,8 +10015,8 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
struct netdev_phys_item_id a_id = { };
struct netdev_phys_item_id b_id = { };
- if (dev_get_port_parent_id(a, &a_id, true) ||
- dev_get_port_parent_id(b, &b_id, true))
+ if (netif_get_port_parent_id(a, &a_id, true) ||
+ netif_get_port_parent_id(b, &b_id, true))
return false;
return netdev_phys_item_id_same(&a_id, &b_id);
@@ -10364,7 +10550,8 @@ int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
goto unlock;
}
- bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
+ bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog,
+ attr->link_create.attach_type);
link->dev = dev;
link->flags = attr->link_create.flags;
@@ -10730,12 +10917,14 @@ sync_lower:
* *before* calling udp_tunnel_get_rx_info,
* but *after* calling udp_tunnel_drop_rx_info.
*/
+ udp_tunnel_nic_lock(dev);
if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
dev->features = features;
udp_tunnel_get_rx_info(dev);
} else {
udp_tunnel_drop_rx_info(dev);
}
+ udp_tunnel_nic_unlock(dev);
}
if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -11715,7 +11904,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->priv_len = sizeof_priv;
- ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
+ ref_tracker_dir_init(&dev->refcnt_tracker, 128, "netdev");
#ifdef CONFIG_PCPU_DEV_REFCNT
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
@@ -11789,6 +11978,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
goto free_all;
dev->cfg_pending = dev->cfg;
+ dev->num_napi_configs = maxqs;
napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
@@ -11937,21 +12127,8 @@ static void netdev_rss_contexts_free(struct net_device *dev)
mutex_lock(&dev->ethtool->rss_lock);
xa_for_each(&dev->ethtool->rss_ctx, context, ctx) {
- struct ethtool_rxfh_param rxfh;
-
- rxfh.indir = ethtool_rxfh_context_indir(ctx);
- rxfh.key = ethtool_rxfh_context_key(ctx);
- rxfh.hfunc = ctx->hfunc;
- rxfh.input_xfrm = ctx->input_xfrm;
- rxfh.rss_context = context;
- rxfh.rss_delete = true;
-
xa_erase(&dev->ethtool->rss_ctx, context);
- if (dev->ethtool_ops->create_rxfh_context)
- dev->ethtool_ops->remove_rxfh_context(dev, ctx,
- context, NULL);
- else
- dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL);
+ dev->ethtool_ops->remove_rxfh_context(dev, ctx, context, NULL);
kfree(ctx);
}
xa_destroy(&dev->ethtool->rss_ctx);
@@ -11999,6 +12176,35 @@ static void dev_memory_provider_uninstall(struct net_device *dev)
}
}
+/* devices must be UP and netdev_lock()'d */
+static void netif_close_many_and_unlock(struct list_head *close_head)
+{
+ struct net_device *dev, *tmp;
+
+ netif_close_many(close_head, false);
+
+ /* ... now unlock them */
+ list_for_each_entry_safe(dev, tmp, close_head, close_list) {
+ netdev_unlock(dev);
+ list_del_init(&dev->close_list);
+ }
+}
+
+static void netif_close_many_and_unlock_cond(struct list_head *close_head)
+{
+#ifdef CONFIG_LOCKDEP
+ /* We can only track up to MAX_LOCK_DEPTH locks per task.
+ *
+ * Reserve half the available slots for additional locks possibly
+ * taken by notifiers and (soft)irqs.
+ */
+ unsigned int limit = MAX_LOCK_DEPTH / 2;
+
+ if (lockdep_depth(current) > limit)
+ netif_close_many_and_unlock(close_head);
+#endif
+}
+
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh)
{
@@ -12031,20 +12237,21 @@ void unregister_netdevice_many_notify(struct list_head *head,
/* If device is running, close it first. Start with ops locked... */
list_for_each_entry(dev, head, unreg_list) {
+ if (!(dev->flags & IFF_UP))
+ continue;
if (netdev_need_ops_lock(dev)) {
list_add_tail(&dev->close_list, &close_head);
netdev_lock(dev);
}
+ netif_close_many_and_unlock_cond(&close_head);
}
- dev_close_many(&close_head, true);
- /* ... now unlock them and go over the rest. */
+ netif_close_many_and_unlock(&close_head);
+ /* ... now go over the rest. */
list_for_each_entry(dev, head, unreg_list) {
- if (netdev_need_ops_lock(dev))
- netdev_unlock(dev);
- else
+ if (!netdev_need_ops_lock(dev))
list_add_tail(&dev->close_list, &close_head);
}
- dev_close_many(&close_head, true);
+ netif_close_many(&close_head, true);
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
@@ -12819,7 +13026,6 @@ static int __init net_dev_init(void)
sd->cpu = i;
#endif
INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
- spin_lock_init(&sd->defer_lock);
gro_init(&sd->backlog.gro);
sd->backlog.poll = process_backlog;
@@ -12829,6 +13035,11 @@ static int __init net_dev_init(void)
if (net_page_pool_create(i))
goto out;
}
+ net_hotdata.skb_defer_nodes =
+ __alloc_percpu(sizeof(struct skb_defer_node) * nr_node_ids,
+ __alignof__(struct skb_defer_node));
+ if (!net_hotdata.skb_defer_nodes)
+ goto out;
if (use_backlog_threads())
smpboot_register_percpu_thread(&backlog_threads);
diff --git a/net/core/dev.h b/net/core/dev.h
index e93f36b7ddf3..900880e8b5b4 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -315,6 +315,28 @@ static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
WRITE_ONCE(n->irq_suspend_timeout, timeout);
}
+static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
+{
+ if (test_bit(NAPI_STATE_THREADED, &n->state))
+ return NETDEV_NAPI_THREADED_ENABLED;
+
+ return NETDEV_NAPI_THREADED_DISABLED;
+}
+
+static inline enum netdev_napi_threaded
+napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
+{
+ if (n->config)
+ return n->config->threaded;
+ return dev->threaded;
+}
+
+int napi_set_threaded(struct napi_struct *n,
+ enum netdev_napi_threaded threaded);
+
+int netif_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded);
+
int rps_cpumask_housekeeping(struct cpumask *mask);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
@@ -335,7 +357,7 @@ static inline void napi_assert_will_not_race(const struct napi_struct *napi)
WARN_ON(READ_ONCE(napi->list_owner) != -1);
}
-void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+void kick_defer_list_purge(unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 90716bd736f3..76c91f224886 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -603,7 +603,7 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
ASSERT_RTNL();
- err = dev_pre_changeaddr_notify(dev, addr, NULL);
+ err = netif_pre_changeaddr_notify(dev, addr, NULL);
if (err)
return err;
err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
diff --git a/net/core/dev_api.c b/net/core/dev_api.c
index 1bf0153195f2..f28852078aa6 100644
--- a/net/core/dev_api.c
+++ b/net/core/dev_api.c
@@ -367,3 +367,16 @@ void netdev_state_change(struct net_device *dev)
netdev_unlock_ops(dev);
}
EXPORT_SYMBOL(netdev_state_change);
+
+int dev_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded)
+{
+ int ret;
+
+ netdev_lock(dev);
+ ret = netif_set_threaded(dev, threaded);
+ netdev_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(dev_set_threaded);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 616479e71466..ad54b12d4b4c 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -147,7 +147,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
switch (cmd) {
case SIOCGIFFLAGS: /* Get interface flags */
- ifr->ifr_flags = (short) dev_get_flags(dev);
+ ifr->ifr_flags = (short)netif_get_flags(dev);
return 0;
case SIOCGIFMETRIC: /* Get the metric on the interface
@@ -464,8 +464,15 @@ int generic_hwtstamp_get_lower(struct net_device *dev,
if (!netif_device_present(dev))
return -ENODEV;
- if (ops->ndo_hwtstamp_get)
- return dev_get_hwtstamp_phylib(dev, kernel_cfg);
+ if (ops->ndo_hwtstamp_get) {
+ int err;
+
+ netdev_lock_ops(dev);
+ err = dev_get_hwtstamp_phylib(dev, kernel_cfg);
+ netdev_unlock_ops(dev);
+
+ return err;
+ }
/* Legacy path: unconverted lower driver */
return generic_hwtstamp_ioctl_lower(dev, SIOCGHWTSTAMP, kernel_cfg);
@@ -481,8 +488,15 @@ int generic_hwtstamp_set_lower(struct net_device *dev,
if (!netif_device_present(dev))
return -ENODEV;
- if (ops->ndo_hwtstamp_set)
- return dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
+ if (ops->ndo_hwtstamp_set) {
+ int err;
+
+ netdev_lock_ops(dev);
+ err = dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
+ netdev_unlock_ops(dev);
+
+ return err;
+ }
/* Legacy path: unconverted lower driver */
return generic_hwtstamp_ioctl_lower(dev, SIOCSHWTSTAMP, kernel_cfg);
@@ -728,7 +742,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
switch (cmd) {
case SIOCGIFHWADDR:
dev_load(net, ifr->ifr_name);
- ret = dev_get_mac_address(&ifr->ifr_hwaddr, net, ifr->ifr_name);
+ ret = netif_get_mac_address(&ifr->ifr_hwaddr, net,
+ ifr->ifr_name);
if (colon)
*colon = ':';
return ret;
diff --git a/net/core/devmem.c b/net/core/devmem.c
index b3a62ca0df65..d9de31a6cc7f 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -70,14 +70,13 @@ void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
gen_pool_destroy(binding->chunk_pool);
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
- DMA_FROM_DEVICE);
+ binding->direction);
dma_buf_detach(binding->dmabuf, binding->attachment);
dma_buf_put(binding->dmabuf);
xa_destroy(&binding->bound_rxqs);
kvfree(binding->tx_vec);
kfree(binding);
}
-EXPORT_SYMBOL(__net_devmem_dmabuf_binding_free);
struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
@@ -177,6 +176,7 @@ err_close_rxq:
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
+ struct device *dma_dev,
enum dma_data_direction direction,
unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
struct netlink_ext_ack *extack)
@@ -189,6 +189,11 @@ net_devmem_bind_dmabuf(struct net_device *dev,
unsigned long virtual;
int err;
+ if (!dma_dev) {
+ NL_SET_ERR_MSG(extack, "Device doesn't support DMA");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
dmabuf = dma_buf_get(dmabuf_fd);
if (IS_ERR(dmabuf))
return ERR_CAST(dmabuf);
@@ -208,8 +213,9 @@ net_devmem_bind_dmabuf(struct net_device *dev,
mutex_init(&binding->lock);
binding->dmabuf = dmabuf;
+ binding->direction = direction;
- binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
+ binding->attachment = dma_buf_attach(binding->dmabuf, dma_dev);
if (IS_ERR(binding->attachment)) {
err = PTR_ERR(binding->attachment);
NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
@@ -312,7 +318,7 @@ err_tx_vec:
kvfree(binding->tx_vec);
err_unmap:
dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
- DMA_FROM_DEVICE);
+ direction);
err_detach:
dma_buf_detach(dmabuf, binding->attachment);
err_free_binding:
diff --git a/net/core/devmem.h b/net/core/devmem.h
index 0a3b28ba5c13..101150d761af 100644
--- a/net/core/devmem.h
+++ b/net/core/devmem.h
@@ -56,6 +56,9 @@ struct net_devmem_dmabuf_binding {
*/
u32 id;
+ /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
+ enum dma_data_direction direction;
+
/* Array of net_iov pointers for this binding, sorted by virtual
* address. This array is convenient to map the virtual addresses to
* net_iovs in the TX path.
@@ -82,6 +85,7 @@ struct dmabuf_genpool_chunk_owner {
void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
+ struct device *dma_dev,
enum dma_data_direction direction,
unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
struct netlink_ext_ack *extack);
@@ -165,12 +169,9 @@ static inline void net_devmem_put_net_iov(struct net_iov *niov)
{
}
-static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
-{
-}
-
static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev,
+ struct device *dma_dev,
enum dma_data_direction direction,
unsigned int dmabuf_fd,
struct netdev_nl_sock *priv,
diff --git a/net/core/dst.c b/net/core/dst.c
index 795ca07e28a4..e9d35f49c9e7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -145,12 +145,12 @@ void dst_dev_put(struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
- dst->obsolete = DST_OBSOLETE_DEAD;
+ WRITE_ONCE(dst->obsolete, DST_OBSOLETE_DEAD);
if (dst->ops->ifdown)
dst->ops->ifdown(dst, dev);
- dst->input = dst_discard;
- dst->output = dst_discard_out;
- dst->dev = blackhole_netdev;
+ WRITE_ONCE(dst->input, dst_discard);
+ WRITE_ONCE(dst->output, dst_discard_out);
+ rcu_assign_pointer(dst->dev_rcu, blackhole_netdev);
netdev_ref_replace(dev, blackhole_netdev, &dst->dev_tracker,
GFP_ATOMIC);
}
@@ -263,7 +263,7 @@ unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
- return mtu ? : dst->dev->mtu;
+ return mtu ? : dst_dev(dst)->mtu;
}
EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index 93a04d18e505..9ab4902324e1 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -52,7 +52,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
if (unlikely(!time_after(idst->refresh_ts,
READ_ONCE(dst_cache->reset_ts)) ||
- (dst->obsolete && !dst->ops->check(dst, idst->cookie)))) {
+ (READ_ONCE(dst->obsolete) && !dst->ops->check(dst, idst->cookie)))) {
dst_cache_per_cpu_dst_set(idst, NULL, 0);
dst_release(dst);
goto fail;
diff --git a/net/core/filter.c b/net/core/filter.c
index 7a72f766aacf..76628df1fc82 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -122,6 +122,7 @@ EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
* @cap: limit on how short the eBPF program may trim the packet
+ * @reason: record drop reason on errors (negative return value)
*
* Run the eBPF program and then cut skb->data to correct size returned by
* the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -130,7 +131,8 @@ EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb,
+ unsigned int cap, enum skb_drop_reason *reason)
{
int err;
struct sk_filter *filter;
@@ -142,15 +144,20 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
*/
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
+ *reason = SKB_DROP_REASON_PFMEMALLOC;
return -ENOMEM;
}
err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
- if (err)
+ if (err) {
+ *reason = SKB_DROP_REASON_SOCKET_FILTER;
return err;
+ }
err = security_sock_rcv_skb(sk, skb);
- if (err)
+ if (err) {
+ *reason = SKB_DROP_REASON_SECURITY_HOOK;
return err;
+ }
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
@@ -162,6 +169,8 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
skb->sk = save_sk;
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+ if (err)
+ *reason = SKB_DROP_REASON_SOCKET_FILTER;
}
rcu_read_unlock();
@@ -2272,6 +2281,7 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
goto out_drop;
+ skb_dst_drop(skb);
skb_dst_set(skb, dst);
} else if (nh->nh_family != AF_INET6) {
goto out_drop;
@@ -2364,7 +2374,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4 = {
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
+ .flowi4_dscp = ip4h_dscp(ip4h),
.flowi4_oif = dev->ifindex,
.flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
@@ -2380,6 +2390,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
goto out_drop;
}
+ skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
}
@@ -4144,34 +4155,45 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
return 0;
}
-static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
- enum xdp_mem_type mem_type, bool release)
+static struct xdp_buff *bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
+ bool tail, bool release)
{
- struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
+ struct xdp_buff *zc_frag = tail ? xsk_buff_get_tail(xdp) :
+ xsk_buff_get_head(xdp);
if (release) {
- xsk_buff_del_tail(zc_frag);
- __xdp_return(0, mem_type, false, zc_frag);
+ xsk_buff_del_frag(zc_frag);
} else {
- zc_frag->data_end -= shrink;
+ if (tail)
+ zc_frag->data_end -= shrink;
+ else
+ zc_frag->data += shrink;
}
+
+ return zc_frag;
}
static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
- int shrink)
+ int shrink, bool tail)
{
enum xdp_mem_type mem_type = xdp->rxq->mem.type;
bool release = skb_frag_size(frag) == shrink;
+ netmem_ref netmem = skb_frag_netmem(frag);
+ struct xdp_buff *zc_frag = NULL;
if (mem_type == MEM_TYPE_XSK_BUFF_POOL) {
- bpf_xdp_shrink_data_zc(xdp, shrink, mem_type, release);
- goto out;
+ netmem = 0;
+ zc_frag = bpf_xdp_shrink_data_zc(xdp, shrink, tail, release);
}
- if (release)
- __xdp_return(skb_frag_netmem(frag), mem_type, false, NULL);
+ if (release) {
+ __xdp_return(netmem, mem_type, false, zc_frag);
+ } else {
+ if (!tail)
+ skb_frag_off_add(frag, shrink);
+ skb_frag_size_sub(frag, shrink);
+ }
-out:
return release;
}
@@ -4189,18 +4211,15 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
len_free += shrink;
offset -= shrink;
- if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
+ if (bpf_xdp_shrink_data(xdp, frag, shrink, true))
n_frags_free++;
- } else {
- skb_frag_size_sub(frag, shrink);
- break;
- }
}
sinfo->nr_frags -= n_frags_free;
sinfo->xdp_frags_size -= len_free;
if (unlikely(!sinfo->nr_frags)) {
xdp_buff_clear_frags_flag(xdp);
+ xdp_buff_clear_frag_pfmemalloc(xdp);
xdp->data_end -= offset;
}
@@ -6011,7 +6030,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
fl4.flowi4_iif = params->ifindex;
fl4.flowi4_oif = 0;
}
- fl4.flowi4_tos = params->tos & INET_DSCP_MASK;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(params->tos);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
@@ -6758,7 +6777,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
int dif, int sdif, u8 family, u8 proto)
{
- struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo;
bool refcounted = false;
struct sock *sk = NULL;
@@ -6767,7 +6785,7 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
__be32 dst4 = tuple->ipv4.daddr;
if (proto == IPPROTO_TCP)
- sk = __inet_lookup(net, hinfo, NULL, 0,
+ sk = __inet_lookup(net, NULL, 0,
src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport,
dif, sdif, &refcounted);
@@ -6781,7 +6799,7 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
if (proto == IPPROTO_TCP)
- sk = __inet6_lookup(net, hinfo, NULL, 0,
+ sk = __inet6_lookup(net, NULL, 0,
src6, tuple->ipv6.sport,
dst6, ntohs(tuple->ipv6.dport),
dif, sdif, &refcounted);
@@ -7422,6 +7440,8 @@ u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
offsetof(struct xdp_sock, FIELD)); \
} while (0)
+ BTF_TYPE_EMIT(struct bpf_xdp_sock);
+
switch (si->off) {
case offsetof(struct bpf_xdp_sock, queue_id):
BPF_XDP_SOCK_GET(queue_id);
@@ -8690,7 +8710,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != sizeof(__u64))
return false;
break;
- case offsetof(struct __sk_buff, sk):
+ case bpf_ctx_range_ptr(struct __sk_buff, sk):
if (type == BPF_WRITE || size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
@@ -9268,20 +9288,24 @@ static bool sock_addr_is_valid_access(int off, int size,
return false;
}
break;
- case offsetof(struct bpf_sock_addr, sk):
+ case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
if (type != BPF_READ)
return false;
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCKET;
break;
- default:
- if (type == BPF_READ) {
- if (size != size_default)
- return false;
- } else {
+ case bpf_ctx_range(struct bpf_sock_addr, user_family):
+ case bpf_ctx_range(struct bpf_sock_addr, family):
+ case bpf_ctx_range(struct bpf_sock_addr, type):
+ case bpf_ctx_range(struct bpf_sock_addr, protocol):
+ if (type != BPF_READ)
return false;
- }
+ if (size != size_default)
+ return false;
+ break;
+ default:
+ return false;
}
return true;
@@ -9318,17 +9342,17 @@ static bool sock_ops_is_valid_access(int off, int size,
if (size != sizeof(__u64))
return false;
break;
- case offsetof(struct bpf_sock_ops, sk):
+ case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCKET_OR_NULL;
break;
- case offsetof(struct bpf_sock_ops, skb_data):
+ case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_PACKET;
break;
- case offsetof(struct bpf_sock_ops, skb_data_end):
+ case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_PACKET_END;
@@ -9337,7 +9361,7 @@ static bool sock_ops_is_valid_access(int off, int size,
bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size,
size_default);
- case offsetof(struct bpf_sock_ops, skb_hwtstamp):
+ case bpf_ctx_range(struct bpf_sock_ops, skb_hwtstamp):
if (size != sizeof(__u64))
return false;
break;
@@ -9407,17 +9431,17 @@ static bool sk_msg_is_valid_access(int off, int size,
return false;
switch (off) {
- case offsetof(struct sk_msg_md, data):
+ case bpf_ctx_range_ptr(struct sk_msg_md, data):
info->reg_type = PTR_TO_PACKET;
if (size != sizeof(__u64))
return false;
break;
- case offsetof(struct sk_msg_md, data_end):
+ case bpf_ctx_range_ptr(struct sk_msg_md, data_end):
info->reg_type = PTR_TO_PACKET_END;
if (size != sizeof(__u64))
return false;
break;
- case offsetof(struct sk_msg_md, sk):
+ case bpf_ctx_range_ptr(struct sk_msg_md, sk):
if (size != sizeof(__u64))
return false;
info->reg_type = PTR_TO_SOCKET;
@@ -9449,6 +9473,9 @@ static bool flow_dissector_is_valid_access(int off, int size,
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
+ if (off % size != 0)
+ return false;
+
if (type == BPF_WRITE)
return false;
@@ -11623,7 +11650,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
return false;
switch (off) {
- case offsetof(struct bpf_sk_lookup, sk):
+ case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
info->reg_type = PTR_TO_SOCKET_OR_NULL;
return size == sizeof(__u64);
@@ -11978,6 +12005,16 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return func;
}
+/**
+ * bpf_skb_meta_pointer() - Gets a mutable pointer within the skb metadata area.
+ * @skb: socket buffer carrying the metadata
+ * @offset: offset into the metadata area, must be <= skb_metadata_len()
+ */
+void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
+{
+ return skb_metadata_end(skb) - skb_metadata_len(skb) + offset;
+}
+
__bpf_kfunc_start_defs();
__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
struct bpf_dynptr *ptr__uninit)
@@ -11995,6 +12032,42 @@ __bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
return 0;
}
+/**
+ * bpf_dynptr_from_skb_meta() - Initialize a dynptr to the skb metadata area.
+ * @skb_: socket buffer carrying the metadata
+ * @flags: future use, must be zero
+ * @ptr__uninit: dynptr to initialize
+ *
+ * Set up a dynptr for access to the metadata area earlier allocated from the
+ * XDP context with bpf_xdp_adjust_meta(). Serves as an alternative to
+ * &__sk_buff->data_meta.
+ *
+ * If passed @skb_ is a clone which shares the data with the original, the
+ * dynptr will be read-only. This limitation may be lifted in the future.
+ *
+ * Return:
+ * * %0 - dynptr ready to use
+ * * %-EINVAL - invalid flags, dynptr set to null
+ */
+__bpf_kfunc int bpf_dynptr_from_skb_meta(struct __sk_buff *skb_, u64 flags,
+ struct bpf_dynptr *ptr__uninit)
+{
+ struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
+ struct sk_buff *skb = (struct sk_buff *)skb_;
+
+ if (flags) {
+ bpf_dynptr_set_null(ptr);
+ return -EINVAL;
+ }
+
+ bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB_META, 0, skb_metadata_len(skb));
+
+ if (skb_cloned(skb))
+ bpf_dynptr_set_rdonly(ptr);
+
+ return 0;
+}
+
__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags,
struct bpf_dynptr *ptr__uninit)
{
@@ -12148,6 +12221,98 @@ __bpf_kfunc int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops,
return 0;
}
+/**
+ * bpf_xdp_pull_data() - Pull in non-linear xdp data.
+ * @x: &xdp_md associated with the XDP buffer
+ * @len: length of data to be made directly accessible in the linear part
+ *
+ * Pull in data in case the XDP buffer associated with @x is non-linear and
+ * not all @len are in the linear data area.
+ *
+ * Direct packet access allows reading and writing linear XDP data through
+ * packet pointers (i.e., &xdp_md->data + offsets). The amount of data which
+ * ends up in the linear part of the xdp_buff depends on the NIC and its
+ * configuration. When a frag-capable XDP program wants to directly access
+ * headers that may be in the non-linear area, call this kfunc to make sure
+ * the data is available in the linear area. Alternatively, use dynptr or
+ * bpf_xdp_{load,store}_bytes() to access data without pulling.
+ *
+ * This kfunc can also be used with bpf_xdp_adjust_head() to decapsulate
+ * headers in the non-linear data area.
+ *
+ * A call to this kfunc may reduce headroom. If there is not enough tailroom
+ * in the linear data area, metadata and data will be shifted down.
+ *
+ * A call to this kfunc is susceptible to change the buffer geometry.
+ * Therefore, at load time, all checks on pointers previously done by the
+ * verifier are invalidated and must be performed again, if the kfunc is used
+ * in combination with direct packet access.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - invalid len
+ */
+__bpf_kfunc int bpf_xdp_pull_data(struct xdp_md *x, u32 len)
+{
+ struct xdp_buff *xdp = (struct xdp_buff *)x;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ int i, delta, shift, headroom, tailroom, n_frags_free = 0;
+ void *data_hard_end = xdp_data_hard_end(xdp);
+ int data_len = xdp->data_end - xdp->data;
+ void *start;
+
+ if (len <= data_len)
+ return 0;
+
+ if (unlikely(len > xdp_get_buff_len(xdp)))
+ return -EINVAL;
+
+ start = xdp_data_meta_unsupported(xdp) ? xdp->data : xdp->data_meta;
+
+ headroom = start - xdp->data_hard_start - sizeof(struct xdp_frame);
+ tailroom = data_hard_end - xdp->data_end;
+
+ delta = len - data_len;
+ if (unlikely(delta > tailroom + headroom))
+ return -EINVAL;
+
+ shift = delta - tailroom;
+ if (shift > 0) {
+ memmove(start - shift, start, xdp->data_end - start);
+
+ xdp->data_meta -= shift;
+ xdp->data -= shift;
+ xdp->data_end -= shift;
+ }
+
+ for (i = 0; i < sinfo->nr_frags && delta; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ u32 shrink = min_t(u32, delta, skb_frag_size(frag));
+
+ memcpy(xdp->data_end, skb_frag_address(frag), shrink);
+
+ xdp->data_end += shrink;
+ sinfo->xdp_frags_size -= shrink;
+ delta -= shrink;
+ if (bpf_xdp_shrink_data(xdp, frag, shrink, false))
+ n_frags_free++;
+ }
+
+ if (unlikely(n_frags_free)) {
+ memmove(sinfo->frags, sinfo->frags + n_frags_free,
+ (sinfo->nr_frags - n_frags_free) * sizeof(skb_frag_t));
+
+ sinfo->nr_frags -= n_frags_free;
+
+ if (!sinfo->nr_frags) {
+ xdp_buff_clear_frags_flag(xdp);
+ xdp_buff_clear_frag_pfmemalloc(xdp);
+ }
+ }
+
+ return 0;
+}
+
__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
@@ -12169,8 +12334,13 @@ BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_START(bpf_kfunc_check_set_skb_meta)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb_meta, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_skb_meta)
+
BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
+BTF_ID_FLAGS(func, bpf_xdp_pull_data)
BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
@@ -12190,6 +12360,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
.set = &bpf_kfunc_check_set_skb,
};
+static const struct btf_kfunc_id_set bpf_kfunc_set_skb_meta = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_skb_meta,
+};
+
static const struct btf_kfunc_id_set bpf_kfunc_set_xdp = {
.owner = THIS_MODULE,
.set = &bpf_kfunc_check_set_xdp,
@@ -12225,6 +12400,8 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_skb_meta);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &bpf_kfunc_set_skb_meta);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
&bpf_kfunc_set_sock_addr);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 7d426a8e29f3..f112156db587 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -90,10 +90,12 @@ static void est_timer(struct timer_list *t)
rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
+ preempt_disable_nested();
write_seqcount_begin(&est->seq);
est->avbps += brate;
est->avpps += rate;
write_seqcount_end(&est->seq);
+ preempt_enable_nested();
est->last_bytes = b_bytes;
est->last_packets = b_packets;
diff --git a/net/core/gro.c b/net/core/gro.c
index b350e5b69549..76f9c3712422 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/psp.h>
#include <net/gro.h>
#include <net/dst_metadata.h>
#include <net/busy_poll.h>
@@ -376,6 +377,7 @@ static void gro_list_prepare(const struct list_head *head,
diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
+ diffs |= __psp_skb_coalesce_diff(skb, p, diffs);
}
NAPI_GRO_CB(p)->same_flow = !diffs;
@@ -637,6 +639,8 @@ EXPORT_SYMBOL(gro_receive_skb);
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo;
+
if (unlikely(skb->pfmemalloc)) {
consume_skb(skb);
return;
@@ -653,8 +657,12 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb->ip_summed = CHECKSUM_NONE;
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->gso_size = 0;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_type = 0;
+ shinfo->gso_size = 0;
+ shinfo->hwtstamps.hwtstamp = 0;
+
if (unlikely(skb->slow_gro)) {
skb_orphan(skb);
skb_ext_reset(skb);
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ff8e5b64bf6b..fd57b845de33 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -8,11 +8,13 @@
struct gro_cell {
struct sk_buff_head napi_skbs;
struct napi_struct napi;
+ local_lock_t bh_lock;
};
int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
+ bool have_bh_lock = false;
struct gro_cell *cell;
int res;
@@ -25,6 +27,8 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
goto unlock;
}
+ local_lock_nested_bh(&gcells->cells->bh_lock);
+ have_bh_lock = true;
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
@@ -42,6 +46,8 @@ drop:
res = NET_RX_SUCCESS;
unlock:
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
rcu_read_unlock();
return res;
}
@@ -54,6 +60,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
struct sk_buff *skb;
int work_done = 0;
+ __local_lock_nested_bh(&cell->bh_lock);
while (work_done < budget) {
skb = __skb_dequeue(&cell->napi_skbs);
if (!skb)
@@ -64,6 +71,7 @@ static int gro_cell_poll(struct napi_struct *napi, int budget)
if (work_done < budget)
napi_complete_done(napi, work_done);
+ __local_unlock_nested_bh(&cell->bh_lock);
return work_done;
}
@@ -79,6 +87,7 @@ int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+ local_lock_init(&cell->bh_lock);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
index 0bc893d5f07b..95d0a4df1006 100644
--- a/net/core/hotdata.c
+++ b/net/core/hotdata.c
@@ -2,7 +2,9 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/list.h>
+#include <net/aligned_data.h>
#include <net/hotdata.h>
+#include <net/ip.h>
#include <net/proto_memory.h>
struct net_hotdata net_hotdata __cacheline_aligned = {
@@ -22,3 +24,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
.sysctl_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE
};
EXPORT_SYMBOL(net_hotdata);
+
+struct net_aligned_data net_aligned_data;
+EXPORT_IPV6_MOD(net_aligned_data);
diff --git a/net/core/ieee8021q_helpers.c b/net/core/ieee8021q_helpers.c
index 759a9b9f3f89..669b357b73b2 100644
--- a/net/core/ieee8021q_helpers.c
+++ b/net/core/ieee8021q_helpers.c
@@ -7,6 +7,11 @@
#include <net/dscp.h>
#include <net/ieee8021q.h>
+/* verify that table covers all 8 traffic types */
+#define TT_MAP_SIZE_OK(tbl) \
+ compiletime_assert(ARRAY_SIZE(tbl) == IEEE8021Q_TT_MAX, \
+ #tbl " size mismatch")
+
/* The following arrays map Traffic Types (TT) to traffic classes (TC) for
* different number of queues as shown in the example provided by
* IEEE 802.1Q-2022 in Annex I "I.3 Traffic type to traffic class mapping" and
@@ -101,51 +106,28 @@ int ieee8021q_tt_to_tc(enum ieee8021q_traffic_type tt, unsigned int num_queues)
switch (num_queues) {
case 8:
- compiletime_assert(ARRAY_SIZE(ieee8021q_8queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_8queue_tt_tc_map != max - 1");
+ TT_MAP_SIZE_OK(ieee8021q_8queue_tt_tc_map);
return ieee8021q_8queue_tt_tc_map[tt];
case 7:
- compiletime_assert(ARRAY_SIZE(ieee8021q_7queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_7queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_7queue_tt_tc_map);
return ieee8021q_7queue_tt_tc_map[tt];
case 6:
- compiletime_assert(ARRAY_SIZE(ieee8021q_6queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_6queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_6queue_tt_tc_map);
return ieee8021q_6queue_tt_tc_map[tt];
case 5:
- compiletime_assert(ARRAY_SIZE(ieee8021q_5queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_5queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_5queue_tt_tc_map);
return ieee8021q_5queue_tt_tc_map[tt];
case 4:
- compiletime_assert(ARRAY_SIZE(ieee8021q_4queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_4queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_4queue_tt_tc_map);
return ieee8021q_4queue_tt_tc_map[tt];
case 3:
- compiletime_assert(ARRAY_SIZE(ieee8021q_3queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_3queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_3queue_tt_tc_map);
return ieee8021q_3queue_tt_tc_map[tt];
case 2:
- compiletime_assert(ARRAY_SIZE(ieee8021q_2queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_2queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_2queue_tt_tc_map);
return ieee8021q_2queue_tt_tc_map[tt];
case 1:
- compiletime_assert(ARRAY_SIZE(ieee8021q_1queue_tt_tc_map) !=
- IEEE8021Q_TT_MAX - 1,
- "ieee8021q_1queue_tt_tc_map != max - 1");
-
+ TT_MAP_SIZE_OK(ieee8021q_1queue_tt_tc_map);
return ieee8021q_1queue_tt_tc_map[tt];
}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 864f3bbc3a4c..212cde35affa 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -157,9 +157,9 @@ static void linkwatch_schedule_work(int urgent)
* override the existing timer.
*/
if (test_bit(LW_URGENT, &linkwatch_flags))
- mod_delayed_work(system_unbound_wq, &linkwatch_work, 0);
+ mod_delayed_work(system_dfl_wq, &linkwatch_work, 0);
else
- queue_delayed_work(system_unbound_wq, &linkwatch_work, delay);
+ queue_delayed_work(system_dfl_wq, &linkwatch_work, delay);
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index ae74634310a3..9f40be0c3e71 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -8,12 +8,12 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/bpf.h>
+#include <net/flow.h>
#include <net/lwtunnel.h>
#include <net/gre.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/ipv6_stubs.h>
-#include <net/inet_dscp.h>
struct bpf_lwt_prog {
struct bpf_prog *prog;
@@ -209,7 +209,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
fl4.flowi4_oif = oif;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_uid = sock_net_uid(net, sk);
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
+ fl4.flowi4_dscp = ip4h_dscp(iph);
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = iph->protocol;
fl4.daddr = iph->daddr;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 49dce9a82295..bddfa389effa 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -28,6 +28,7 @@
#include <net/neighbour.h>
#include <net/arp.h>
#include <net/dst.h>
+#include <net/ip.h>
#include <net/sock.h>
#include <net/netevent.h>
#include <net/netlink.h>
@@ -53,8 +54,8 @@ static void neigh_timer_handler(struct timer_list *t);
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid);
static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
-static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
- struct net_device *dev);
+static void pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm);
#ifdef CONFIG_PROC_FS
static const struct seq_operations neigh_stat_seq_ops;
@@ -153,11 +154,12 @@ static void neigh_update_gc_list(struct neighbour *n)
if (n->dead)
goto out;
- /* remove from the gc list if new state is permanent or if neighbor
- * is externally learned; otherwise entry should be on the gc list
+ /* remove from the gc list if new state is permanent or if neighbor is
+ * externally learned / validated; otherwise entry should be on the gc
+ * list
*/
exempt_from_gc = n->nud_state & NUD_PERMANENT ||
- n->flags & NTF_EXT_LEARNED;
+ n->flags & (NTF_EXT_LEARNED | NTF_EXT_VALIDATED);
on_gc_list = !list_empty(&n->gc_list);
if (exempt_from_gc && on_gc_list) {
@@ -204,6 +206,7 @@ static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
+ ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_VALIDATED) ? NTF_EXT_VALIDATED : 0;
if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
if (ndm_flags & NTF_EXT_LEARNED)
@@ -221,6 +224,14 @@ static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
*notify = 1;
*managed_update = true;
}
+ if ((old_flags ^ ndm_flags) & NTF_EXT_VALIDATED) {
+ if (ndm_flags & NTF_EXT_VALIDATED)
+ neigh->flags |= NTF_EXT_VALIDATED;
+ else
+ neigh->flags &= ~NTF_EXT_VALIDATED;
+ *notify = 1;
+ *gc_update = true;
+ }
}
bool neigh_remove_one(struct neighbour *n)
@@ -368,6 +379,43 @@ static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
}
}
+static void neigh_flush_one(struct neighbour *n)
+{
+ hlist_del_rcu(&n->hash);
+ hlist_del_rcu(&n->dev_list);
+
+ write_lock(&n->lock);
+
+ neigh_del_timer(n);
+ neigh_mark_dead(n);
+
+ if (refcount_read(&n->refcnt) != 1) {
+ /* The most unpleasant situation.
+ * We must destroy neighbour entry,
+ * but someone still uses it.
+ *
+ * The destroy will be delayed until
+ * the last user releases us, but
+ * we must kill timers etc. and move
+ * it to safe state.
+ */
+ __skb_queue_purge(&n->arp_queue);
+ n->arp_queue_len_bytes = 0;
+ WRITE_ONCE(n->output, neigh_blackhole);
+
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+ else
+ n->nud_state = NUD_NONE;
+
+ neigh_dbg(2, "neigh %p is stray\n", n);
+ }
+
+ write_unlock(&n->lock);
+
+ neigh_cleanup_and_release(n);
+}
+
static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
bool skip_perm)
{
@@ -378,35 +426,29 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
dev_head = neigh_get_dev_table(dev, tbl->family);
hlist_for_each_entry_safe(n, tmp, dev_head, dev_list) {
- if (skip_perm && n->nud_state & NUD_PERMANENT)
+ if (skip_perm &&
+ (n->nud_state & NUD_PERMANENT ||
+ n->flags & NTF_EXT_VALIDATED))
continue;
- hlist_del_rcu(&n->hash);
- hlist_del_rcu(&n->dev_list);
- write_lock(&n->lock);
- neigh_del_timer(n);
- neigh_mark_dead(n);
- if (refcount_read(&n->refcnt) != 1) {
- /* The most unpleasant situation.
- * We must destroy neighbour entry,
- * but someone still uses it.
- *
- * The destroy will be delayed until
- * the last user releases us, but
- * we must kill timers etc. and move
- * it to safe state.
- */
- __skb_queue_purge(&n->arp_queue);
- n->arp_queue_len_bytes = 0;
- WRITE_ONCE(n->output, neigh_blackhole);
- if (n->nud_state & NUD_VALID)
- n->nud_state = NUD_NOARP;
- else
- n->nud_state = NUD_NONE;
- neigh_dbg(2, "neigh %p is stray\n", n);
- }
- write_unlock(&n->lock);
- neigh_cleanup_and_release(n);
+ neigh_flush_one(n);
+ }
+}
+
+static void neigh_flush_table(struct neigh_table *tbl)
+{
+ struct neigh_hash_table *nht;
+ int i;
+
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+
+ for (i = 0; i < (1 << nht->hash_shift); i++) {
+ struct hlist_node *tmp;
+ struct neighbour *n;
+
+ neigh_for_each_in_bucket_safe(n, tmp, &nht->hash_heads[i])
+ neigh_flush_one(n);
}
}
@@ -422,8 +464,15 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
bool skip_perm)
{
write_lock_bh(&tbl->lock);
- neigh_flush_dev(tbl, dev, skip_perm);
- pneigh_ifdown_and_unlock(tbl, dev);
+ if (likely(dev)) {
+ neigh_flush_dev(tbl, dev, skip_perm);
+ } else {
+ DEBUG_NET_WARN_ON_ONCE(skip_perm);
+ neigh_flush_table(tbl);
+ }
+ write_unlock_bh(&tbl->lock);
+
+ pneigh_ifdown(tbl, dev, skip_perm);
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
tbl->family);
if (skb_queue_empty_lockless(&tbl->proxy_queue))
@@ -706,54 +755,53 @@ static u32 pneigh_hash(const void *pkey, unsigned int key_len)
return hash_val;
}
-static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
- struct net *net,
- const void *pkey,
- unsigned int key_len,
- struct net_device *dev)
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey,
+ struct net_device *dev)
{
+ struct pneigh_entry *n;
+ unsigned int key_len;
+ u32 hash_val;
+
+ key_len = tbl->key_len;
+ hash_val = pneigh_hash(pkey, key_len);
+ n = rcu_dereference_check(tbl->phash_buckets[hash_val],
+ lockdep_is_held(&tbl->phash_lock));
+
while (n) {
if (!memcmp(n->key, pkey, key_len) &&
net_eq(pneigh_net(n), net) &&
(n->dev == dev || !n->dev))
return n;
- n = n->next;
- }
- return NULL;
-}
-struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
- struct net *net, const void *pkey, struct net_device *dev)
-{
- unsigned int key_len = tbl->key_len;
- u32 hash_val = pneigh_hash(pkey, key_len);
+ n = rcu_dereference_check(n->next, lockdep_is_held(&tbl->phash_lock));
+ }
- return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
- net, pkey, key_len, dev);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(__pneigh_lookup);
+EXPORT_IPV6_MOD(pneigh_lookup);
-struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
- struct net *net, const void *pkey,
- struct net_device *dev, int creat)
+int pneigh_create(struct neigh_table *tbl, struct net *net,
+ const void *pkey, struct net_device *dev,
+ u32 flags, u8 protocol, bool permanent)
{
struct pneigh_entry *n;
- unsigned int key_len = tbl->key_len;
- u32 hash_val = pneigh_hash(pkey, key_len);
-
- read_lock_bh(&tbl->lock);
- n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
- net, pkey, key_len, dev);
- read_unlock_bh(&tbl->lock);
+ unsigned int key_len;
+ u32 hash_val;
+ int err = 0;
- if (n || !creat)
- goto out;
+ mutex_lock(&tbl->phash_lock);
- ASSERT_RTNL();
+ n = pneigh_lookup(tbl, net, pkey, dev);
+ if (n)
+ goto update;
+ key_len = tbl->key_len;
n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
- if (!n)
+ if (!n) {
+ err = -ENOBUFS;
goto out;
+ }
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
@@ -763,73 +811,98 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (tbl->pconstructor && tbl->pconstructor(n)) {
netdev_put(dev, &n->dev_tracker);
kfree(n);
- n = NULL;
+ err = -ENOBUFS;
goto out;
}
- write_lock_bh(&tbl->lock);
+ hash_val = pneigh_hash(pkey, key_len);
n->next = tbl->phash_buckets[hash_val];
- tbl->phash_buckets[hash_val] = n;
- write_unlock_bh(&tbl->lock);
+ rcu_assign_pointer(tbl->phash_buckets[hash_val], n);
+update:
+ WRITE_ONCE(n->flags, flags);
+ n->permanent = permanent;
+ WRITE_ONCE(n->protocol, protocol);
out:
- return n;
+ mutex_unlock(&tbl->phash_lock);
+ return err;
}
-EXPORT_SYMBOL(pneigh_lookup);
+static void pneigh_destroy(struct rcu_head *rcu)
+{
+ struct pneigh_entry *n = container_of(rcu, struct pneigh_entry, rcu);
+
+ netdev_put(n->dev, &n->dev_tracker);
+ kfree(n);
+}
int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
struct net_device *dev)
{
- struct pneigh_entry *n, **np;
- unsigned int key_len = tbl->key_len;
- u32 hash_val = pneigh_hash(pkey, key_len);
+ struct pneigh_entry *n, __rcu **np;
+ unsigned int key_len;
+ u32 hash_val;
- write_lock_bh(&tbl->lock);
- for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
+ key_len = tbl->key_len;
+ hash_val = pneigh_hash(pkey, key_len);
+
+ mutex_lock(&tbl->phash_lock);
+
+ for (np = &tbl->phash_buckets[hash_val];
+ (n = rcu_dereference_protected(*np, 1)) != NULL;
np = &n->next) {
if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
net_eq(pneigh_net(n), net)) {
- *np = n->next;
- write_unlock_bh(&tbl->lock);
+ rcu_assign_pointer(*np, n->next);
+
+ mutex_unlock(&tbl->phash_lock);
+
if (tbl->pdestructor)
tbl->pdestructor(n);
- netdev_put(n->dev, &n->dev_tracker);
- kfree(n);
+
+ call_rcu(&n->rcu, pneigh_destroy);
return 0;
}
}
- write_unlock_bh(&tbl->lock);
+
+ mutex_unlock(&tbl->phash_lock);
return -ENOENT;
}
-static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
- struct net_device *dev)
+static void pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
+ bool skip_perm)
{
- struct pneigh_entry *n, **np, *freelist = NULL;
+ struct pneigh_entry *n, __rcu **np;
+ LIST_HEAD(head);
u32 h;
+ mutex_lock(&tbl->phash_lock);
+
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
np = &tbl->phash_buckets[h];
- while ((n = *np) != NULL) {
+ while ((n = rcu_dereference_protected(*np, 1)) != NULL) {
+ if (skip_perm && n->permanent)
+ goto skip;
if (!dev || n->dev == dev) {
- *np = n->next;
- n->next = freelist;
- freelist = n;
+ rcu_assign_pointer(*np, n->next);
+ list_add(&n->free_node, &head);
continue;
}
+skip:
np = &n->next;
}
}
- write_unlock_bh(&tbl->lock);
- while ((n = freelist)) {
- freelist = n->next;
- n->next = NULL;
+
+ mutex_unlock(&tbl->phash_lock);
+
+ while (!list_empty(&head)) {
+ n = list_first_entry(&head, typeof(*n), free_node);
+ list_del(&n->free_node);
+
if (tbl->pdestructor)
tbl->pdestructor(n);
- netdev_put(n->dev, &n->dev_tracker);
- kfree(n);
+
+ call_rcu(&n->rcu, pneigh_destroy);
}
- return -ENOENT;
}
static inline void neigh_parms_put(struct neigh_parms *parms)
@@ -937,7 +1010,8 @@ static void neigh_periodic_work(struct work_struct *work)
state = n->nud_state;
if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
- (n->flags & NTF_EXT_LEARNED)) {
+ (n->flags &
+ (NTF_EXT_LEARNED | NTF_EXT_VALIDATED))) {
write_unlock(&n->lock);
continue;
}
@@ -1090,9 +1164,15 @@ static void neigh_timer_handler(struct timer_list *t)
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
- WRITE_ONCE(neigh->nud_state, NUD_FAILED);
+ if (neigh->nud_state == NUD_PROBE &&
+ neigh->flags & NTF_EXT_VALIDATED) {
+ WRITE_ONCE(neigh->nud_state, NUD_STALE);
+ neigh->updated = jiffies;
+ } else {
+ WRITE_ONCE(neigh->nud_state, NUD_FAILED);
+ neigh_invalidate(neigh);
+ }
notify = 1;
- neigh_invalidate(neigh);
goto out;
}
@@ -1240,6 +1320,8 @@ static void neigh_update_hhs(struct neighbour *neigh)
NTF_ROUTER flag.
NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
a router.
+ NEIGH_UPDATE_F_EXT_VALIDATED means that the entry will not be removed
+ or invalidated.
Caller MUST hold reference count on the entry.
*/
@@ -1402,7 +1484,8 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
* we can reinject the packet there.
*/
n2 = NULL;
- if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
+ if (dst &&
+ READ_ONCE(dst->obsolete) != DST_OBSOLETE_DEAD) {
n2 = dst_neigh_lookup_skb(dst, skb);
if (n2)
n1 = n2;
@@ -1756,6 +1839,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
rwlock_init(&tbl->lock);
+ mutex_init(&tbl->phash_lock);
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
@@ -1972,21 +2056,13 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[NDA_PROTOCOL])
protocol = nla_get_u8(tb[NDA_PROTOCOL]);
if (ndm_flags & NTF_PROXY) {
- struct pneigh_entry *pn;
-
- if (ndm_flags & NTF_MANAGED) {
+ if (ndm_flags & (NTF_MANAGED | NTF_EXT_VALIDATED)) {
NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
goto out;
}
- err = -ENOBUFS;
- pn = pneigh_lookup(tbl, net, dst, dev, 1);
- if (pn) {
- pn->flags = ndm_flags;
- if (protocol)
- pn->protocol = protocol;
- err = 0;
- }
+ err = pneigh_create(tbl, net, dst, dev, ndm_flags, protocol,
+ !!(ndm->ndm_state & NUD_PERMANENT));
goto out;
}
@@ -2004,7 +2080,8 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (neigh == NULL) {
bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT;
bool exempt_from_gc = ndm_permanent ||
- ndm_flags & NTF_EXT_LEARNED;
+ ndm_flags & (NTF_EXT_LEARNED |
+ NTF_EXT_VALIDATED);
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
@@ -2015,10 +2092,27 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
err = -EINVAL;
goto out;
}
+ if (ndm_flags & NTF_EXT_VALIDATED) {
+ u8 state = ndm->ndm_state;
+
+ /* NTF_USE and NTF_MANAGED will result in the neighbor
+ * being created with an invalid state (NUD_NONE).
+ */
+ if (ndm_flags & (NTF_USE | NTF_MANAGED))
+ state = NUD_NONE;
+
+ if (!(state & NUD_VALID)) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot create externally validated neighbor with an invalid state");
+ err = -EINVAL;
+ goto out;
+ }
+ }
neigh = ___neigh_create(tbl, dst, dev,
ndm_flags &
- (NTF_EXT_LEARNED | NTF_MANAGED),
+ (NTF_EXT_LEARNED | NTF_MANAGED |
+ NTF_EXT_VALIDATED),
exempt_from_gc, true);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
@@ -2030,6 +2124,24 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
neigh_release(neigh);
goto out;
}
+ if (ndm_flags & NTF_EXT_VALIDATED) {
+ u8 state = ndm->ndm_state;
+
+ /* NTF_USE and NTF_MANAGED do not update the existing
+ * state other than clearing it if it was
+ * NUD_PERMANENT.
+ */
+ if (ndm_flags & (NTF_USE | NTF_MANAGED))
+ state = READ_ONCE(neigh->nud_state) & ~NUD_PERMANENT;
+
+ if (!(state & NUD_VALID)) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot mark neighbor as externally validated with an invalid state");
+ err = -EINVAL;
+ neigh_release(neigh);
+ goto out;
+ }
+ }
if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
@@ -2046,13 +2158,13 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
flags |= NEIGH_UPDATE_F_MANAGED;
if (ndm_flags & NTF_USE)
flags |= NEIGH_UPDATE_F_USE;
+ if (ndm_flags & NTF_EXT_VALIDATED)
+ flags |= NEIGH_UPDATE_F_EXT_VALIDATED;
err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
NETLINK_CB(skb).portid, extack);
- if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
+ if (!err && ndm_flags & (NTF_USE | NTF_MANAGED))
neigh_event_send(neigh, NULL);
- err = 0;
- }
neigh_release(neigh);
out:
return err;
@@ -2579,13 +2691,15 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
u32 neigh_flags, neigh_flags_ext;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
+ u8 protocol;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
- neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
- neigh_flags = pn->flags & NTF_OLD_MASK;
+ neigh_flags = READ_ONCE(pn->flags);
+ neigh_flags_ext = neigh_flags >> NTF_EXT_SHIFT;
+ neigh_flags &= NTF_OLD_MASK;
ndm = nlmsg_data(nlh);
ndm->ndm_family = tbl->family;
@@ -2599,7 +2713,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
goto nla_put_failure;
- if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
+ protocol = READ_ONCE(pn->protocol);
+ if (protocol && nla_put_u8(skb, NDA_PROTOCOL, protocol))
goto nla_put_failure;
if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
goto nla_put_failure;
@@ -2706,12 +2821,12 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (filter->dev_idx || filter->master_idx)
flags |= NLM_F_DUMP_FILTERED;
- read_lock_bh(&tbl->lock);
-
for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
if (h > s_h)
s_idx = 0;
- for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+ for (n = rcu_dereference(tbl->phash_buckets[h]), idx = 0;
+ n;
+ n = rcu_dereference(n->next)) {
if (idx < s_idx || pneigh_net(n) != net)
goto next;
if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
@@ -2720,16 +2835,13 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
err = pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH, flags, tbl);
- if (err < 0) {
- read_unlock_bh(&tbl->lock);
+ if (err < 0)
goto out;
- }
next:
idx++;
}
}
- read_unlock_bh(&tbl->lock);
out:
cb->args[3] = h;
cb->args[4] = idx;
@@ -2846,64 +2958,58 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
return err;
}
-static int neigh_valid_get_req(const struct nlmsghdr *nlh,
- struct neigh_table **tbl,
- void **dst, int *dev_idx, u8 *ndm_flags,
- struct netlink_ext_ack *extack)
+static struct ndmsg *neigh_valid_get_req(const struct nlmsghdr *nlh,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
{
- struct nlattr *tb[NDA_MAX + 1];
struct ndmsg *ndm;
int err, i;
ndm = nlmsg_payload(nlh, sizeof(*ndm));
if (!ndm) {
NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
ndm->ndm_type) {
NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (ndm->ndm_flags & ~NTF_PROXY) {
NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!(ndm->ndm_flags & NTF_PROXY) && !ndm->ndm_ifindex) {
+ NL_SET_ERR_MSG(extack, "No device specified");
+ return ERR_PTR(-EINVAL);
}
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
NDA_MAX, nda_policy, extack);
if (err < 0)
- return err;
-
- *ndm_flags = ndm->ndm_flags;
- *dev_idx = ndm->ndm_ifindex;
- *tbl = neigh_find_table(ndm->ndm_family);
- if (*tbl == NULL) {
- NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
- return -EAFNOSUPPORT;
- }
+ return ERR_PTR(err);
for (i = 0; i <= NDA_MAX; ++i) {
- if (!tb[i])
- continue;
-
switch (i) {
case NDA_DST:
- if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
- NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
- return -EINVAL;
+ if (!tb[i]) {
+ NL_SET_ERR_ATTR_MISS(extack, NULL, NDA_DST);
+ return ERR_PTR(-EINVAL);
}
- *dst = nla_data(tb[i]);
break;
default:
+ if (!tb[i])
+ continue;
+
NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
}
- return 0;
+ return ndm;
}
static inline size_t neigh_nlmsg_size(void)
@@ -2917,27 +3023,6 @@ static inline size_t neigh_nlmsg_size(void)
+ nla_total_size(1); /* NDA_PROTOCOL */
}
-static int neigh_get_reply(struct net *net, struct neighbour *neigh,
- u32 pid, u32 seq)
-{
- struct sk_buff *skb;
- int err = 0;
-
- skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
- if (err) {
- kfree_skb(skb);
- goto errout;
- }
-
- err = rtnl_unicast(skb, net, pid);
-errout:
- return err;
-}
-
static inline size_t pneigh_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
@@ -2946,85 +3031,91 @@ static inline size_t pneigh_nlmsg_size(void)
+ nla_total_size(1); /* NDA_PROTOCOL */
}
-static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
- u32 pid, u32 seq, struct neigh_table *tbl)
+static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
+ struct net *net = sock_net(in_skb->sk);
+ u32 pid = NETLINK_CB(in_skb).portid;
+ struct nlattr *tb[NDA_MAX + 1];
+ struct net_device *dev = NULL;
+ u32 seq = nlh->nlmsg_seq;
+ struct neigh_table *tbl;
+ struct neighbour *neigh;
struct sk_buff *skb;
- int err = 0;
+ struct ndmsg *ndm;
+ void *dst;
+ int err;
- skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
+ ndm = neigh_valid_get_req(nlh, tb, extack);
+ if (IS_ERR(ndm))
+ return PTR_ERR(ndm);
+
+ if (ndm->ndm_flags & NTF_PROXY)
+ skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
+ else
+ skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
if (!skb)
return -ENOBUFS;
- err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
- if (err) {
- kfree_skb(skb);
- goto errout;
- }
+ rcu_read_lock();
- err = rtnl_unicast(skb, net, pid);
-errout:
- return err;
-}
+ tbl = neigh_find_table(ndm->ndm_family);
+ if (!tbl) {
+ NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
+ err = -EAFNOSUPPORT;
+ goto err_unlock;
+ }
-static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct net *net = sock_net(in_skb->sk);
- struct net_device *dev = NULL;
- struct neigh_table *tbl = NULL;
- struct neighbour *neigh;
- void *dst = NULL;
- u8 ndm_flags = 0;
- int dev_idx = 0;
- int err;
+ if (nla_len(tb[NDA_DST]) != (int)tbl->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
+ err = -EINVAL;
+ goto err_unlock;
+ }
- err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
- extack);
- if (err < 0)
- return err;
+ dst = nla_data(tb[NDA_DST]);
- if (dev_idx) {
- dev = __dev_get_by_index(net, dev_idx);
+ if (ndm->ndm_ifindex) {
+ dev = dev_get_by_index_rcu(net, ndm->ndm_ifindex);
if (!dev) {
NL_SET_ERR_MSG(extack, "Unknown device ifindex");
- return -ENODEV;
+ err = -ENODEV;
+ goto err_unlock;
}
}
- if (!dst) {
- NL_SET_ERR_MSG(extack, "Network address not specified");
- return -EINVAL;
- }
-
- if (ndm_flags & NTF_PROXY) {
+ if (ndm->ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
- pn = pneigh_lookup(tbl, net, dst, dev, 0);
+ pn = pneigh_lookup(tbl, net, dst, dev);
if (!pn) {
NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
- return -ENOENT;
+ err = -ENOENT;
+ goto err_unlock;
}
- return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, tbl);
- }
- if (!dev) {
- NL_SET_ERR_MSG(extack, "No device specified");
- return -EINVAL;
- }
+ err = pneigh_fill_info(skb, pn, pid, seq, RTM_NEWNEIGH, 0, tbl);
+ if (err)
+ goto err_unlock;
+ } else {
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (!neigh) {
+ NL_SET_ERR_MSG(extack, "Neighbour entry not found");
+ err = -ENOENT;
+ goto err_unlock;
+ }
- neigh = neigh_lookup(tbl, dst, dev);
- if (!neigh) {
- NL_SET_ERR_MSG(extack, "Neighbour entry not found");
- return -ENOENT;
+ err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
+ neigh_release(neigh);
+ if (err)
+ goto err_unlock;
}
- err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq);
-
- neigh_release(neigh);
+ rcu_read_unlock();
+ return rtnl_unicast(skb, net, pid);
+err_unlock:
+ rcu_read_unlock();
+ kfree_skb(skb);
return err;
}
@@ -3231,9 +3322,10 @@ static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
state->flags |= NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
- pn = tbl->phash_buckets[bucket];
+ pn = rcu_dereference(tbl->phash_buckets[bucket]);
+
while (pn && !net_eq(pneigh_net(pn), net))
- pn = pn->next;
+ pn = rcu_dereference(pn->next);
if (pn)
break;
}
@@ -3251,15 +3343,17 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct neigh_table *tbl = state->tbl;
do {
- pn = pn->next;
+ pn = rcu_dereference(pn->next);
} while (pn && !net_eq(pneigh_net(pn), net));
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
- pn = tbl->phash_buckets[state->bucket];
+
+ pn = rcu_dereference(tbl->phash_buckets[state->bucket]);
+
while (pn && !net_eq(pneigh_net(pn), net))
- pn = pn->next;
+ pn = rcu_dereference(pn->next);
if (pn)
break;
}
@@ -3823,7 +3917,7 @@ static const struct rtnl_msg_handler neigh_rtnl_msg_handlers[] __initconst = {
{.msgtype = RTM_NEWNEIGH, .doit = neigh_add},
{.msgtype = RTM_DELNEIGH, .doit = neigh_delete},
{.msgtype = RTM_GETNEIGH, .doit = neigh_get, .dumpit = neigh_dump_info,
- .flags = RTNL_FLAG_DUMP_UNLOCKED},
+ .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
{.msgtype = RTM_GETNEIGHTBL, .dumpit = neightbl_dump_info},
{.msgtype = RTM_SETNEIGHTBL, .doit = neightbl_set},
};
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 4f0f0709a1cb..70e0e9a3b650 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -145,7 +145,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x\n",
- READ_ONCE(sd->processed), atomic_read(&sd->dropped),
+ READ_ONCE(sd->processed),
+ numa_drop_read(&sd->drop_counters),
READ_ONCE(sd->time_squeeze), 0,
0, 0, 0, 0, /* was fastroute */
0, /* was cpu_collision */
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1ace0cd01adc..ca878525ad7c 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -256,7 +256,7 @@ static ssize_t name_assign_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(name_assign_type);
-/* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */
+/* use same locking rules as GIFHWADDR ioctl's (netif_get_mac_address()) */
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -641,12 +641,6 @@ static ssize_t phys_port_id_show(struct device *dev,
struct netdev_phys_item_id ppid;
ssize_t ret;
- /* The check is also done in dev_get_phys_port_id; this helps returning
- * early without hitting the locking section below.
- */
- if (!netdev->netdev_ops->ndo_get_phys_port_id)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
@@ -668,13 +662,6 @@ static ssize_t phys_port_name_show(struct device *dev,
char name[IFNAMSIZ];
ssize_t ret;
- /* The checks are also done in dev_get_phys_port_name; this helps
- * returning early without hitting the locking section below.
- */
- if (!netdev->netdev_ops->ndo_get_phys_port_name &&
- !netdev->devlink_port)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
@@ -696,19 +683,11 @@ static ssize_t phys_switch_id_show(struct device *dev,
struct netdev_phys_item_id ppid = { };
ssize_t ret;
- /* The checks are also done in dev_get_phys_port_name; this helps
- * returning early without hitting the locking section below. This works
- * because recurse is false when calling dev_get_port_parent_id.
- */
- if (!netdev->netdev_ops->ndo_get_port_parent_id &&
- !netdev->devlink_port)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
- ret = dev_get_port_parent_id(netdev, &ppid, false);
+ ret = netif_get_port_parent_id(netdev, &ppid, false);
if (!ret)
ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id);
@@ -718,6 +697,40 @@ static ssize_t phys_switch_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(phys_switch_id);
+static struct attribute *netdev_phys_attrs[] __ro_after_init = {
+ &dev_attr_phys_port_id.attr,
+ &dev_attr_phys_port_name.attr,
+ &dev_attr_phys_switch_id.attr,
+ NULL,
+};
+
+static umode_t netdev_phys_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct net_device *netdev = to_net_dev(dev);
+
+ if (attr == &dev_attr_phys_port_id.attr) {
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return 0;
+ } else if (attr == &dev_attr_phys_port_name.attr) {
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->devlink_port)
+ return 0;
+ } else if (attr == &dev_attr_phys_switch_id.attr) {
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->devlink_port)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group netdev_phys_group = {
+ .attrs = netdev_phys_attrs,
+ .is_visible = netdev_phys_is_visible,
+};
+
static ssize_t threaded_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -744,7 +757,7 @@ static int modify_napi_threaded(struct net_device *dev, unsigned long val)
if (val != 0 && val != 1)
return -EOPNOTSUPP;
- ret = dev_set_threaded(dev, val);
+ ret = netif_set_threaded(dev, val);
return ret;
}
@@ -783,9 +796,6 @@ static struct attribute *net_class_attrs[] __ro_after_init = {
&dev_attr_tx_queue_len.attr,
&dev_attr_gro_flush_timeout.attr,
&dev_attr_napi_defer_hard_irqs.attr,
- &dev_attr_phys_port_id.attr,
- &dev_attr_phys_port_name.attr,
- &dev_attr_phys_switch_id.attr,
&dev_attr_proto_down.attr,
&dev_attr_carrier_up_count.attr,
&dev_attr_carrier_down_count.attr,
@@ -1110,8 +1120,10 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
return -ENOMEM;
table->log = ilog2(mask) + 1;
- for (count = 0; count <= mask; count++)
+ for (count = 0; count <= mask; count++) {
table->flows[count].cpu = RPS_NO_CPU;
+ table->flows[count].filter = RPS_NO_FILTER;
+ }
} else {
table = NULL;
}
@@ -1200,12 +1212,21 @@ static int rx_queue_default_mask(struct net_device *dev,
struct netdev_rx_queue *queue)
{
#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
- struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask);
+ struct cpumask *rps_default_mask;
+ int res = 0;
+ mutex_lock(&rps_default_mask_mutex);
+
+ rps_default_mask = dev_net(dev)->core.rps_default_mask;
if (rps_default_mask && !cpumask_empty(rps_default_mask))
- return netdev_rx_queue_set_rps_mask(queue, rps_default_mask);
-#endif
+ res = netdev_rx_queue_set_rps_mask(queue, rps_default_mask);
+
+ mutex_unlock(&rps_default_mask_mutex);
+
+ return res;
+#else
return 0;
+#endif
}
static int rx_queue_add_kobject(struct net_device *dev, int index)
@@ -1309,7 +1330,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
struct netdev_rx_queue *queue = &dev->_rx[i];
struct kobject *kobj = &queue->kobj;
- if (!refcount_read(&dev_net(dev)->ns.count))
+ if (!check_net(dev_net(dev)))
kobj->uevent_suppress = 1;
if (dev->sysfs_rx_queue_group)
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
@@ -2042,7 +2063,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
while (--i >= new_num) {
struct netdev_queue *queue = dev->_tx + i;
- if (!refcount_read(&dev_net(dev)->ns.count))
+ if (!check_net(dev_net(dev)))
queue->kobj.uevent_suppress = 1;
if (netdev_uses_bql(dev))
@@ -2296,7 +2317,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
{
struct device *dev = &ndev->dev;
- if (!refcount_read(&dev_net(ndev)->ns.count))
+ if (!check_net(dev_net(ndev)))
dev_set_uevent_suppress(dev, 1);
kobject_get(&dev->kobj);
@@ -2328,6 +2349,7 @@ int netdev_register_kobject(struct net_device *ndev)
groups++;
*groups++ = &netstat_group;
+ *groups++ = &netdev_phys_group;
if (wireless_group_needed(ndev))
*groups++ = &wireless_group;
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 8a5b04c2699a..e938f25e8e86 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -11,4 +11,6 @@ int netdev_queue_update_kobjects(struct net_device *net,
int netdev_change_owner(struct net_device *, const struct net *net_old,
const struct net *net_new);
+extern struct mutex rps_default_mask_mutex;
+
#endif
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ae54f26709ca..b0e0f22d7b21 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -19,9 +19,10 @@
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
#include <linux/uidgid.h>
-#include <linux/cookie.h>
#include <linux/proc_fs.h>
+#include <linux/nstree.h>
+#include <net/aligned_data.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
@@ -64,8 +65,6 @@ DECLARE_RWSEM(pernet_ops_rwsem);
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
-DEFINE_COOKIE(net_cookie);
-
static struct net_generic *net_alloc_generic(void)
{
unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
@@ -316,13 +315,13 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
{
int id;
- if (refcount_read(&net->ns.count) == 0)
+ if (!check_net(net))
return NETNSA_NSID_NOT_ASSIGNED;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
id = __peernet2id(net, peer);
if (id >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
return id;
}
@@ -332,12 +331,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
* just been idr_remove()'d from there in cleanup_net().
*/
if (!maybe_get_net(peer)) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
return NETNSA_NSID_NOT_ASSIGNED;
}
id = alloc_netid(net, peer, -1);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
put_net(peer);
if (id < 0)
@@ -399,12 +398,17 @@ static __net_init void preinit_net_sysctl(struct net *net)
}
/* init code that must occur even if setup_net() is not called. */
-static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns)
+static __net_init int preinit_net(struct net *net, struct user_namespace *user_ns)
{
+ int ret;
+
+ ret = ns_common_init(net);
+ if (ret)
+ return ret;
+
refcount_set(&net->passive, 1);
- refcount_set(&net->ns.count, 1);
- ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
- ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
+ ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt");
+ ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt");
get_random_bytes(&net->hash_mix, sizeof(u32));
net->dev_base_seq = 1;
@@ -422,6 +426,7 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_
INIT_LIST_HEAD(&net->ptype_all);
INIT_LIST_HEAD(&net->ptype_specific);
preinit_net_sysctl(net);
+ return 0;
}
/*
@@ -434,9 +439,7 @@ static __net_init int setup_net(struct net *net)
LIST_HEAD(net_exit_list);
int error = 0;
- preempt_disable();
- net->net_cookie = gen_cookie_next(&net_cookie);
- preempt_enable();
+ net->net_cookie = ns_tree_gen_id(&net->ns);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
@@ -446,6 +449,7 @@ static __net_init int setup_net(struct net *net)
down_write(&net_rwsem);
list_add_tail_rcu(&net->list, &net_namespace_list);
up_write(&net_rwsem);
+ ns_tree_add_raw(net);
out:
return error;
@@ -543,7 +547,7 @@ void net_drop_ns(void *p)
net_passive_dec(net);
}
-struct net *copy_net_ns(unsigned long flags,
+struct net *copy_net_ns(u64 flags,
struct user_namespace *user_ns, struct net *old_net)
{
struct ucounts *ucounts;
@@ -563,7 +567,9 @@ struct net *copy_net_ns(unsigned long flags,
goto dec_ucounts;
}
- preinit_net(net, user_ns);
+ rv = preinit_net(net, user_ns);
+ if (rv < 0)
+ goto dec_ucounts;
net->ucounts = ucounts;
get_user_ns(user_ns);
@@ -577,6 +583,7 @@ struct net *copy_net_ns(unsigned long flags,
if (rv < 0) {
put_userns:
+ ns_common_free(net);
#ifdef CONFIG_KEYS
key_remove_domain(net->key_domain);
#endif
@@ -628,20 +635,20 @@ static void unhash_nsid(struct net *net, struct net *last)
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
GFP_KERNEL);
if (tmp == last)
break;
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
}
static LLIST_HEAD(cleanup_list);
@@ -663,8 +670,10 @@ static void cleanup_net(struct work_struct *work)
/* Don't let anyone else find us. */
down_write(&net_rwsem);
- llist_for_each_entry(net, net_kill_list, cleanup_list)
+ llist_for_each_entry(net, net_kill_list, cleanup_list) {
+ ns_tree_remove(net);
list_del_rcu(&net->list);
+ }
/* Cache last net. After we unlock rtnl, no one new net
* added to net_namespace_list can assign nsid pointer
* to a net from net_kill_list (see peernet2id_alloc()).
@@ -697,6 +706,7 @@ static void cleanup_net(struct work_struct *work)
/* Finally it is safe to free my network namespace structure */
list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
list_del_init(&net->exit_list);
+ ns_common_free(net);
dec_net_namespaces(net->ucounts);
#ifdef CONFIG_KEYS
key_remove_domain(net->key_domain);
@@ -791,22 +801,37 @@ struct net *get_net_ns_by_pid(pid_t pid)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
-static __net_init int net_ns_net_init(struct net *net)
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+static void net_ns_net_debugfs(struct net *net)
{
-#ifdef CONFIG_NET_NS
- net->ns.ops = &netns_operations;
-#endif
- return ns_alloc_inum(&net->ns);
+ ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt",
+ net->net_cookie, net->ns.inum);
+ ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt",
+ net->net_cookie, net->ns.inum);
+}
+
+static int __init init_net_debugfs(void)
+{
+ ref_tracker_dir_debugfs(&init_net.refcnt_tracker);
+ ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker);
+ net_ns_net_debugfs(&init_net);
+ return 0;
}
+late_initcall(init_net_debugfs);
+#else
+static void net_ns_net_debugfs(struct net *net)
+{
+}
+#endif
-static __net_exit void net_ns_net_exit(struct net *net)
+static __net_init int net_ns_net_init(struct net *net)
{
- ns_free_inum(&net->ns);
+ net_ns_net_debugfs(net);
+ return 0;
}
static struct pernet_operations __net_initdata net_ns_ops = {
.init = net_ns_net_init,
- .exit = net_ns_net_exit,
};
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
@@ -852,9 +877,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
@@ -863,7 +888,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
nlh, GFP_KERNEL);
@@ -1252,7 +1277,12 @@ void __init net_ns_init(void)
#ifdef CONFIG_KEYS
init_net.key_domain = &init_net_key_domain;
#endif
- preinit_net(&init_net, &init_user_ns);
+ /*
+ * This currently cannot fail as the initial network namespace
+ * has a static inode number.
+ */
+ if (preinit_net(&init_net, &init_user_ns))
+ panic("Could not preinitialize the initial network namespace");
down_write(&pernet_ops_rwsem);
if (setup_net(&init_net))
@@ -1487,11 +1517,6 @@ static struct ns_common *netns_get(struct task_struct *task)
return net ? &net->ns : NULL;
}
-static inline struct net *to_net_ns(struct ns_common *ns)
-{
- return container_of(ns, struct net, ns);
-}
-
static void netns_put(struct ns_common *ns)
{
put_net(to_net_ns(ns));
@@ -1518,7 +1543,6 @@ static struct user_namespace *netns_owner(struct ns_common *ns)
const struct proc_ns_operations netns_operations = {
.name = "net",
- .type = CLONE_NEWNET,
.get = netns_get,
.put = netns_put,
.install = netns_install,
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index d22f0919821e..dff66d8fb325 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -21,7 +21,9 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
return css_cls_state(task_css_check(p, net_cls_cgrp_id,
- rcu_read_lock_bh_held()));
+ rcu_read_lock_held() ||
+ rcu_read_lock_bh_held() ||
+ rcu_read_lock_trace_held()));
}
EXPORT_SYMBOL_GPL(task_cls_state);
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index 4fc44587f493..e9a2a6f26cb7 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -92,11 +92,12 @@ static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1]
};
/* NETDEV_CMD_NAPI_SET - do */
-static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT + 1] = {
+static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_THREADED + 1] = {
[NETDEV_A_NAPI_ID] = { .type = NLA_U32, },
[NETDEV_A_NAPI_DEFER_HARD_IRQS] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_napi_defer_hard_irqs_range),
[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT, },
[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT] = { .type = NLA_UINT, },
+ [NETDEV_A_NAPI_THREADED] = NLA_POLICY_MAX(NLA_U32, 1),
};
/* NETDEV_CMD_BIND_TX - do */
@@ -193,7 +194,7 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.cmd = NETDEV_CMD_NAPI_SET,
.doit = netdev_nl_napi_set_doit,
.policy = netdev_napi_set_nl_policy,
- .maxattr = NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
+ .maxattr = NETDEV_A_NAPI_THREADED,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 2afa7b2141aa..470fabbeacd9 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -184,6 +184,10 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
goto nla_put_failure;
+ if (nla_put_uint(rsp, NETDEV_A_NAPI_THREADED,
+ napi_get_threaded(napi)))
+ goto nla_put_failure;
+
if (napi->thread) {
pid = task_pid_nr(napi->thread);
if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
@@ -322,8 +326,18 @@ netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
{
u64 irq_suspend_timeout = 0;
u64 gro_flush_timeout = 0;
+ u8 threaded = 0;
u32 defer = 0;
+ if (info->attrs[NETDEV_A_NAPI_THREADED]) {
+ int ret;
+
+ threaded = nla_get_uint(info->attrs[NETDEV_A_NAPI_THREADED]);
+ ret = napi_set_threaded(napi, threaded);
+ if (ret)
+ return ret;
+ }
+
if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
napi_set_defer_hard_irqs(napi, defer);
@@ -855,16 +869,79 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
return err;
}
-int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+static int netdev_nl_read_rxq_bitmap(struct genl_info *info,
+ u32 rxq_bitmap_len,
+ unsigned long *rxq_bitmap)
{
+ const int maxtype = ARRAY_SIZE(netdev_queue_id_nl_policy) - 1;
struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct nlattr *attr;
+ int rem, err = 0;
+ u32 rxq_idx;
+
+ nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ err = nla_parse_nested(tb, maxtype, attr,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ return err;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE))
+ return -EINVAL;
+
+ if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+ return -EINVAL;
+ }
+
+ rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+ if (rxq_idx >= rxq_bitmap_len) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_ID]);
+ return -EINVAL;
+ }
+
+ bitmap_set(rxq_bitmap, rxq_idx, 1);
+ }
+
+ return 0;
+}
+
+static struct device *
+netdev_nl_get_dma_dev(struct net_device *netdev, unsigned long *rxq_bitmap,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dma_dev = NULL;
+ u32 rxq_idx, prev_rxq_idx;
+
+ for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
+ struct device *rxq_dma_dev;
+
+ rxq_dma_dev = netdev_queue_get_dma_dev(netdev, rxq_idx);
+ if (dma_dev && rxq_dma_dev != dma_dev) {
+ NL_SET_ERR_MSG_FMT(extack, "DMA device mismatch between queue %u and %u (multi-PF device?)",
+ rxq_idx, prev_rxq_idx);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ dma_dev = rxq_dma_dev;
+ prev_rxq_idx = rxq_idx;
+ }
+
+ return dma_dev;
+}
+
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
struct net_devmem_dmabuf_binding *binding;
u32 ifindex, dmabuf_fd, rxq_idx;
struct netdev_nl_sock *priv;
struct net_device *netdev;
+ unsigned long *rxq_bitmap;
+ struct device *dma_dev;
struct sk_buff *rsp;
- struct nlattr *attr;
- int rem, err = 0;
+ int err = 0;
void *hdr;
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
@@ -907,36 +984,31 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock;
}
- binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd,
- priv, info->extack);
- if (IS_ERR(binding)) {
- err = PTR_ERR(binding);
+ rxq_bitmap = bitmap_zalloc(netdev->real_num_rx_queues, GFP_KERNEL);
+ if (!rxq_bitmap) {
+ err = -ENOMEM;
goto err_unlock;
}
- nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
- genlmsg_data(info->genlhdr),
- genlmsg_len(info->genlhdr), rem) {
- err = nla_parse_nested(
- tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
- netdev_queue_id_nl_policy, info->extack);
- if (err < 0)
- goto err_unbind;
-
- if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
- NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
- err = -EINVAL;
- goto err_unbind;
- }
+ err = netdev_nl_read_rxq_bitmap(info, netdev->real_num_rx_queues,
+ rxq_bitmap);
+ if (err)
+ goto err_rxq_bitmap;
- if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
- NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
- err = -EINVAL;
- goto err_unbind;
- }
+ dma_dev = netdev_nl_get_dma_dev(netdev, rxq_bitmap, info->extack);
+ if (IS_ERR(dma_dev)) {
+ err = PTR_ERR(dma_dev);
+ goto err_rxq_bitmap;
+ }
- rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+ binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_FROM_DEVICE,
+ dmabuf_fd, priv, info->extack);
+ if (IS_ERR(binding)) {
+ err = PTR_ERR(binding);
+ goto err_rxq_bitmap;
+ }
+ for_each_set_bit(rxq_idx, rxq_bitmap, netdev->real_num_rx_queues) {
err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
info->extack);
if (err)
@@ -950,6 +1022,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_unbind;
+ bitmap_free(rxq_bitmap);
+
netdev_unlock(netdev);
mutex_unlock(&priv->lock);
@@ -958,6 +1032,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
err_unbind:
net_devmem_unbind_dmabuf(binding);
+err_rxq_bitmap:
+ bitmap_free(rxq_bitmap);
err_unlock:
netdev_unlock(netdev);
err_unlock_sock:
@@ -972,6 +1048,7 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
struct net_devmem_dmabuf_binding *binding;
struct netdev_nl_sock *priv;
struct net_device *netdev;
+ struct device *dma_dev;
u32 ifindex, dmabuf_fd;
struct sk_buff *rsp;
int err = 0;
@@ -1018,8 +1095,9 @@ int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_netdev;
}
- binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv,
- info->extack);
+ dma_dev = netdev_queue_get_dma_dev(netdev, 0);
+ binding = net_devmem_bind_dmabuf(netdev, dma_dev, DMA_TO_DEVICE,
+ dmabuf_fd, priv, info->extack);
if (IS_ERR(binding)) {
err = PTR_ERR(binding);
goto err_unlock_netdev;
diff --git a/net/core/netdev_queues.c b/net/core/netdev_queues.c
new file mode 100644
index 000000000000..251f27a8307f
--- /dev/null
+++ b/net/core/netdev_queues.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <net/netdev_queues.h>
+
+/**
+ * netdev_queue_get_dma_dev() - get dma device for zero-copy operations
+ * @dev: net_device
+ * @idx: queue index
+ *
+ * Get dma device for zero-copy operations to be used for this queue.
+ * When such device is not available or valid, the function will return NULL.
+ *
+ * Return: Device or NULL on error
+ */
+struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx)
+{
+ const struct netdev_queue_mgmt_ops *queue_ops = dev->queue_mgmt_ops;
+ struct device *dma_dev;
+
+ if (queue_ops && queue_ops->ndo_queue_get_dma_dev)
+ dma_dev = queue_ops->ndo_queue_get_dma_dev(dev, idx);
+ else
+ dma_dev = dev->dev.parent;
+
+ return dma_dev && dma_dev->dma_mask ? dma_dev : NULL;
+}
+
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index d126f10197bf..c7d9341b7630 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -9,6 +9,15 @@
#include "page_pool_priv.h"
+/* See also page_pool_is_unreadable() */
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, idx);
+
+ return !!rxq->mp_params.mp_ops;
+}
+EXPORT_SYMBOL(netif_rxq_has_unreadable_mp);
+
int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
@@ -97,14 +106,12 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
- if (rxq_idx >= dev->real_num_rx_queues)
- return -EINVAL;
- rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
-
if (rxq_idx >= dev->real_num_rx_queues) {
NL_SET_ERR_MSG(extack, "rx queue index out of range");
return -ERANGE;
}
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
return -EINVAL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4ddb7490df4b..60a05d3b7c24 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,13 +58,6 @@ static void zap_completion_queue(void);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
-#define np_info(np, fmt, ...) \
- pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
-#define np_err(np, fmt, ...) \
- pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
-#define np_notice(np, fmt, ...) \
- pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
-
static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq)
@@ -379,6 +372,31 @@ out:
return ret;
}
+static void netpoll_udp_checksum(struct netpoll *np, struct sk_buff *skb,
+ int len)
+{
+ struct udphdr *udph;
+ int udp_len;
+
+ udp_len = len + sizeof(struct udphdr);
+ udph = udp_hdr(skb);
+
+ /* check needs to be set, since it will be consumed in csum_partial */
+ udph->check = 0;
+ if (np->ipv6)
+ udph->check = csum_ipv6_magic(&np->local_ip.in6,
+ &np->remote_ip.in6,
+ udp_len, IPPROTO_UDP,
+ csum_partial(udph, udp_len, 0));
+ else
+ udph->check = csum_tcpudp_magic(np->local_ip.ip,
+ np->remote_ip.ip,
+ udp_len, IPPROTO_UDP,
+ csum_partial(udph, udp_len, 0));
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+}
+
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
unsigned long flags;
@@ -396,24 +414,101 @@ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
}
EXPORT_SYMBOL(netpoll_send_skb);
+static void push_ipv6(struct netpoll *np, struct sk_buff *skb, int len)
+{
+ struct ipv6hdr *ip6h;
+
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+
+ /* ip6h->version = 6; ip6h->priority = 0; */
+ *(unsigned char *)ip6h = 0x60;
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+
+ ip6h->payload_len = htons(sizeof(struct udphdr) + len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = 32;
+ ip6h->saddr = np->local_ip.in6;
+ ip6h->daddr = np->remote_ip.in6;
+
+ skb->protocol = htons(ETH_P_IPV6);
+}
+
+static void push_ipv4(struct netpoll *np, struct sk_buff *skb, int len)
+{
+ static atomic_t ip_ident;
+ struct iphdr *iph;
+ int ip_len;
+
+ ip_len = len + sizeof(struct udphdr) + sizeof(struct iphdr);
+
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+
+ /* iph->version = 4; iph->ihl = 5; */
+ *(unsigned char *)iph = 0x45;
+ iph->tos = 0;
+ put_unaligned(htons(ip_len), &iph->tot_len);
+ iph->id = htons(atomic_inc_return(&ip_ident));
+ iph->frag_off = 0;
+ iph->ttl = 64;
+ iph->protocol = IPPROTO_UDP;
+ iph->check = 0;
+ put_unaligned(np->local_ip.ip, &iph->saddr);
+ put_unaligned(np->remote_ip.ip, &iph->daddr);
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ skb->protocol = htons(ETH_P_IP);
+}
+
+static void push_udp(struct netpoll *np, struct sk_buff *skb, int len)
+{
+ struct udphdr *udph;
+ int udp_len;
+
+ udp_len = len + sizeof(struct udphdr);
+
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+
+ udph = udp_hdr(skb);
+ udph->source = htons(np->local_port);
+ udph->dest = htons(np->remote_port);
+ udph->len = htons(udp_len);
+
+ netpoll_udp_checksum(np, skb, len);
+}
+
+static void push_eth(struct netpoll *np, struct sk_buff *skb)
+{
+ struct ethhdr *eth;
+
+ eth = skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ ether_addr_copy(eth->h_source, np->dev->dev_addr);
+ ether_addr_copy(eth->h_dest, np->remote_mac);
+ if (np->ipv6)
+ eth->h_proto = htons(ETH_P_IPV6);
+ else
+ eth->h_proto = htons(ETH_P_IP);
+}
+
int netpoll_send_udp(struct netpoll *np, const char *msg, int len)
{
int total_len, ip_len, udp_len;
struct sk_buff *skb;
- struct udphdr *udph;
- struct iphdr *iph;
- struct ethhdr *eth;
- static atomic_t ip_ident;
- struct ipv6hdr *ip6h;
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
WARN_ON_ONCE(!irqs_disabled());
- udp_len = len + sizeof(*udph);
+ udp_len = len + sizeof(struct udphdr);
if (np->ipv6)
- ip_len = udp_len + sizeof(*ip6h);
+ ip_len = udp_len + sizeof(struct ipv6hdr);
else
- ip_len = udp_len + sizeof(*iph);
+ ip_len = udp_len + sizeof(struct iphdr);
total_len = ip_len + LL_RESERVED_SPACE(np->dev);
@@ -425,117 +520,18 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len)
skb_copy_to_linear_data(skb, msg, len);
skb_put(skb, len);
- skb_push(skb, sizeof(*udph));
- skb_reset_transport_header(skb);
- udph = udp_hdr(skb);
- udph->source = htons(np->local_port);
- udph->dest = htons(np->remote_port);
- udph->len = htons(udp_len);
-
- if (np->ipv6) {
- udph->check = csum_ipv6_magic(&np->local_ip.in6,
- &np->remote_ip.in6,
- udp_len, IPPROTO_UDP,
- csum_partial(udph, udp_len, 0));
- if (udph->check == 0)
- udph->check = CSUM_MANGLED_0;
-
- skb_push(skb, sizeof(*ip6h));
- skb_reset_network_header(skb);
- ip6h = ipv6_hdr(skb);
-
- /* ip6h->version = 6; ip6h->priority = 0; */
- *(unsigned char *)ip6h = 0x60;
- ip6h->flow_lbl[0] = 0;
- ip6h->flow_lbl[1] = 0;
- ip6h->flow_lbl[2] = 0;
-
- ip6h->payload_len = htons(sizeof(struct udphdr) + len);
- ip6h->nexthdr = IPPROTO_UDP;
- ip6h->hop_limit = 32;
- ip6h->saddr = np->local_ip.in6;
- ip6h->daddr = np->remote_ip.in6;
-
- eth = skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
- } else {
- udph->check = 0;
- udph->check = csum_tcpudp_magic(np->local_ip.ip,
- np->remote_ip.ip,
- udp_len, IPPROTO_UDP,
- csum_partial(udph, udp_len, 0));
- if (udph->check == 0)
- udph->check = CSUM_MANGLED_0;
-
- skb_push(skb, sizeof(*iph));
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
-
- /* iph->version = 4; iph->ihl = 5; */
- *(unsigned char *)iph = 0x45;
- iph->tos = 0;
- put_unaligned(htons(ip_len), &(iph->tot_len));
- iph->id = htons(atomic_inc_return(&ip_ident));
- iph->frag_off = 0;
- iph->ttl = 64;
- iph->protocol = IPPROTO_UDP;
- iph->check = 0;
- put_unaligned(np->local_ip.ip, &(iph->saddr));
- put_unaligned(np->remote_ip.ip, &(iph->daddr));
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-
- eth = skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb->protocol = eth->h_proto = htons(ETH_P_IP);
- }
-
- ether_addr_copy(eth->h_source, np->dev->dev_addr);
- ether_addr_copy(eth->h_dest, np->remote_mac);
-
+ push_udp(np, skb, len);
+ if (np->ipv6)
+ push_ipv6(np, skb, len);
+ else
+ push_ipv4(np, skb, len);
+ push_eth(np, skb);
skb->dev = np->dev;
return (int)netpoll_send_skb(np, skb);
}
EXPORT_SYMBOL(netpoll_send_udp);
-void netpoll_print_options(struct netpoll *np)
-{
- np_info(np, "local port %d\n", np->local_port);
- if (np->ipv6)
- np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
- else
- np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
- np_info(np, "interface name '%s'\n", np->dev_name);
- np_info(np, "local ethernet address '%pM'\n", np->dev_mac);
- np_info(np, "remote port %d\n", np->remote_port);
- if (np->ipv6)
- np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
- else
- np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
- np_info(np, "remote ethernet address %pM\n", np->remote_mac);
-}
-EXPORT_SYMBOL(netpoll_print_options);
-
-static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
-{
- const char *end;
-
- if (!strchr(str, ':') &&
- in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
- if (!*end)
- return 0;
- }
- if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
-#if IS_ENABLED(CONFIG_IPV6)
- if (!*end)
- return 1;
-#else
- return -1;
-#endif
- }
- return -1;
-}
static void skb_pool_flush(struct netpoll *np)
{
@@ -546,95 +542,6 @@ static void skb_pool_flush(struct netpoll *np)
skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
}
-int netpoll_parse_options(struct netpoll *np, char *opt)
-{
- char *cur=opt, *delim;
- int ipv6;
- bool ipversion_set = false;
-
- if (*cur != '@') {
- if ((delim = strchr(cur, '@')) == NULL)
- goto parse_failed;
- *delim = 0;
- if (kstrtou16(cur, 10, &np->local_port))
- goto parse_failed;
- cur = delim;
- }
- cur++;
-
- if (*cur != '/') {
- ipversion_set = true;
- if ((delim = strchr(cur, '/')) == NULL)
- goto parse_failed;
- *delim = 0;
- ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
- if (ipv6 < 0)
- goto parse_failed;
- else
- np->ipv6 = (bool)ipv6;
- cur = delim;
- }
- cur++;
-
- if (*cur != ',') {
- /* parse out dev_name or dev_mac */
- if ((delim = strchr(cur, ',')) == NULL)
- goto parse_failed;
- *delim = 0;
-
- np->dev_name[0] = '\0';
- eth_broadcast_addr(np->dev_mac);
- if (!strchr(cur, ':'))
- strscpy(np->dev_name, cur, sizeof(np->dev_name));
- else if (!mac_pton(cur, np->dev_mac))
- goto parse_failed;
-
- cur = delim;
- }
- cur++;
-
- if (*cur != '@') {
- /* dst port */
- if ((delim = strchr(cur, '@')) == NULL)
- goto parse_failed;
- *delim = 0;
- if (*cur == ' ' || *cur == '\t')
- np_info(np, "warning: whitespace is not allowed\n");
- if (kstrtou16(cur, 10, &np->remote_port))
- goto parse_failed;
- cur = delim;
- }
- cur++;
-
- /* dst ip */
- if ((delim = strchr(cur, '/')) == NULL)
- goto parse_failed;
- *delim = 0;
- ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
- if (ipv6 < 0)
- goto parse_failed;
- else if (ipversion_set && np->ipv6 != (bool)ipv6)
- goto parse_failed;
- else
- np->ipv6 = (bool)ipv6;
- cur = delim + 1;
-
- if (*cur != 0) {
- /* MAC address */
- if (!mac_pton(cur, np->remote_mac))
- goto parse_failed;
- }
-
- netpoll_print_options(np);
-
- return 0;
-
- parse_failed:
- np_info(np, "couldn't parse config at '%s'!\n", cur);
- return -1;
-}
-EXPORT_SYMBOL(netpoll_parse_options);
-
static void refill_skbs_work_handler(struct work_struct *work)
{
struct netpoll *np =
@@ -684,7 +591,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
np->dev = ndev;
strscpy(np->dev_name, ndev->name, IFNAMSIZ);
- npinfo->netpoll = np;
/* fill up the skb queue */
refill_skbs(np);
@@ -716,13 +622,97 @@ static char *egress_dev(struct netpoll *np, char *buf)
return buf;
}
+static void netpoll_wait_carrier(struct netpoll *np, struct net_device *ndev,
+ unsigned int timeout)
+{
+ unsigned long atmost;
+
+ atmost = jiffies + timeout * HZ;
+ while (!netif_carrier_ok(ndev)) {
+ if (time_after(jiffies, atmost)) {
+ np_notice(np, "timeout waiting for carrier\n");
+ break;
+ }
+ msleep(1);
+ }
+}
+
+/*
+ * Take the IPv6 from ndev and populate local_ip structure in netpoll
+ */
+static int netpoll_take_ipv6(struct netpoll *np, struct net_device *ndev)
+{
+ char buf[MAC_ADDR_STR_LEN + 1];
+ int err = -EDESTADDRREQ;
+ struct inet6_dev *idev;
+
+ if (!IS_ENABLED(CONFIG_IPV6)) {
+ np_err(np, "IPv6 is not supported %s, aborting\n",
+ egress_dev(np, buf));
+ return -EINVAL;
+ }
+
+ idev = __in6_dev_get(ndev);
+ if (idev) {
+ struct inet6_ifaddr *ifp;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
+ continue;
+ /* Got the IP, let's return */
+ np->local_ip.in6 = ifp->addr;
+ err = 0;
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+ }
+ if (err) {
+ np_err(np, "no IPv6 address for %s, aborting\n",
+ egress_dev(np, buf));
+ return err;
+ }
+
+ np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
+ return 0;
+}
+
+/*
+ * Take the IPv4 from ndev and populate local_ip structure in netpoll
+ */
+static int netpoll_take_ipv4(struct netpoll *np, struct net_device *ndev)
+{
+ char buf[MAC_ADDR_STR_LEN + 1];
+ const struct in_ifaddr *ifa;
+ struct in_device *in_dev;
+
+ in_dev = __in_dev_get_rtnl(ndev);
+ if (!in_dev) {
+ np_err(np, "no IP address for %s, aborting\n",
+ egress_dev(np, buf));
+ return -EDESTADDRREQ;
+ }
+
+ ifa = rtnl_dereference(in_dev->ifa_list);
+ if (!ifa) {
+ np_err(np, "no IP address for %s, aborting\n",
+ egress_dev(np, buf));
+ return -EDESTADDRREQ;
+ }
+
+ np->local_ip.ip = ifa->ifa_local;
+ np_info(np, "local IP %pI4\n", &np->local_ip.ip);
+
+ return 0;
+}
+
int netpoll_setup(struct netpoll *np)
{
struct net *net = current->nsproxy->net_ns;
char buf[MAC_ADDR_STR_LEN + 1];
struct net_device *ndev = NULL;
bool ip_overwritten = false;
- struct in_device *in_dev;
int err;
rtnl_lock();
@@ -746,91 +736,44 @@ int netpoll_setup(struct netpoll *np)
}
if (!netif_running(ndev)) {
- unsigned long atmost;
-
np_info(np, "device %s not up yet, forcing it\n",
egress_dev(np, buf));
err = dev_open(ndev, NULL);
-
if (err) {
np_err(np, "failed to open %s\n", ndev->name);
goto put;
}
rtnl_unlock();
- atmost = jiffies + carrier_timeout * HZ;
- while (!netif_carrier_ok(ndev)) {
- if (time_after(jiffies, atmost)) {
- np_notice(np, "timeout waiting for carrier\n");
- break;
- }
- msleep(1);
- }
-
+ netpoll_wait_carrier(np, ndev, carrier_timeout);
rtnl_lock();
}
if (!np->local_ip.ip) {
if (!np->ipv6) {
- const struct in_ifaddr *ifa;
-
- in_dev = __in_dev_get_rtnl(ndev);
- if (!in_dev)
- goto put_noaddr;
-
- ifa = rtnl_dereference(in_dev->ifa_list);
- if (!ifa) {
-put_noaddr:
- np_err(np, "no IP address for %s, aborting\n",
- egress_dev(np, buf));
- err = -EDESTADDRREQ;
+ err = netpoll_take_ipv4(np, ndev);
+ if (err)
goto put;
- }
-
- np->local_ip.ip = ifa->ifa_local;
- ip_overwritten = true;
- np_info(np, "local IP %pI4\n", &np->local_ip.ip);
} else {
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *idev;
-
- err = -EDESTADDRREQ;
- idev = __in6_dev_get(ndev);
- if (idev) {
- struct inet6_ifaddr *ifp;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
- !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
- continue;
- np->local_ip.in6 = ifp->addr;
- ip_overwritten = true;
- err = 0;
- break;
- }
- read_unlock_bh(&idev->lock);
- }
- if (err) {
- np_err(np, "no IPv6 address for %s, aborting\n",
- egress_dev(np, buf));
+ err = netpoll_take_ipv6(np, ndev);
+ if (err)
goto put;
- } else
- np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
-#else
- np_err(np, "IPv6 is not supported %s, aborting\n",
- egress_dev(np, buf));
- err = -EINVAL;
- goto put;
-#endif
}
+ ip_overwritten = true;
}
err = __netpoll_setup(np, ndev);
if (err)
goto flush;
rtnl_unlock();
+
+ /* Make sure all NAPI polls which started before dev->npinfo
+ * was visible have exited before we start calling NAPI poll.
+ * NAPI skips locking if dev->npinfo is NULL.
+ */
+ synchronize_rcu();
+
return 0;
flush:
@@ -863,7 +806,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
kfree(npinfo);
}
-void __netpoll_cleanup(struct netpoll *np)
+static void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
@@ -885,14 +828,13 @@ void __netpoll_cleanup(struct netpoll *np)
skb_pool_flush(np);
}
-EXPORT_SYMBOL_GPL(__netpoll_cleanup);
void __netpoll_free(struct netpoll *np)
{
ASSERT_RTNL();
/* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu();
+ synchronize_net();
__netpoll_cleanup(np);
kfree(np);
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ba7cf3e3c32f..1a5edec485f1 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -211,11 +211,7 @@ static int page_pool_init(struct page_pool *pool,
return -EINVAL;
if (pool->p.pool_size)
- ring_qsize = pool->p.pool_size;
-
- /* Sanity limit mem that can be pinned down */
- if (ring_qsize > 32768)
- return -E2BIG;
+ ring_qsize = min(pool->p.pool_size, 16384);
/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
* DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
@@ -287,8 +283,10 @@ static int page_pool_init(struct page_pool *pool,
}
if (pool->mp_ops) {
- if (!pool->dma_map || !pool->dma_sync)
- return -EOPNOTSUPP;
+ if (!pool->dma_map || !pool->dma_sync) {
+ err = -EOPNOTSUPP;
+ goto free_ptr_ring;
+ }
if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
err = -EFAULT;
@@ -371,7 +369,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
}
EXPORT_SYMBOL(page_pool_create);
-static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem);
+static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
{
@@ -409,7 +407,7 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
* (2) break out to fallthrough to alloc_pages_node.
* This limit stress on page buddy alloactor.
*/
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
alloc_stat_inc(pool, waive);
netmem = 0;
break;
@@ -470,11 +468,60 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
}
}
+static int page_pool_register_dma_index(struct page_pool *pool,
+ netmem_ref netmem, gfp_t gfp)
+{
+ int err = 0;
+ u32 id;
+
+ if (unlikely(!PP_DMA_INDEX_BITS))
+ goto out;
+
+ if (in_softirq())
+ err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
+ PP_DMA_INDEX_LIMIT, gfp);
+ else
+ err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
+ PP_DMA_INDEX_LIMIT, gfp);
+ if (err) {
+ WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
+ goto out;
+ }
+
+ netmem_set_dma_index(netmem, id);
+out:
+ return err;
+}
+
+static int page_pool_release_dma_index(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ struct page *old, *page = netmem_to_page(netmem);
+ unsigned long id;
+
+ if (unlikely(!PP_DMA_INDEX_BITS))
+ return 0;
+
+ id = netmem_get_dma_index(netmem);
+ if (!id)
+ return -1;
+
+ if (in_softirq())
+ old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
+ else
+ old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
+ if (old != page)
+ return -1;
+
+ netmem_set_dma_index(netmem, 0);
+
+ return 0;
+}
+
static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp)
{
dma_addr_t dma;
int err;
- u32 id;
/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
* since dma_addr_t can be either 32 or 64 bits and does not always fit
@@ -493,18 +540,10 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t g
goto unmap_failed;
}
- if (in_softirq())
- err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem),
- PP_DMA_INDEX_LIMIT, gfp);
- else
- err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem),
- PP_DMA_INDEX_LIMIT, gfp);
- if (err) {
- WARN_ONCE(err != -ENOMEM, "couldn't track DMA mapping, please report to netdev@");
+ err = page_pool_register_dma_index(pool, netmem, gfp);
+ if (err)
goto unset_failed;
- }
- netmem_set_dma_index(netmem, id);
page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len);
return true;
@@ -544,8 +583,8 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
}
/* slow path */
-static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
- gfp_t gfp)
+static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool,
+ gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
unsigned int pp_order = pool->p.order;
@@ -553,6 +592,12 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
netmem_ref netmem;
int i, nr_pages;
+ /* Unconditionally set NOWARN if allocating from NAPI.
+ * Drivers forget to set it, and OOM reports on packet Rx are useless.
+ */
+ if ((gfp & GFP_ATOMIC) == GFP_ATOMIC)
+ gfp |= __GFP_NOWARN;
+
/* Don't support bulk alloc for high-order pages */
if (unlikely(pp_order))
return page_to_netmem(__page_pool_alloc_page_order(pool, gfp));
@@ -615,7 +660,7 @@ netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
netmem = pool->mp_ops->alloc_netmems(pool, gfp);
else
- netmem = __page_pool_alloc_pages_slow(pool, gfp);
+ netmem = __page_pool_alloc_netmems_slow(pool, gfp);
return netmem;
}
EXPORT_SYMBOL(page_pool_alloc_netmems);
@@ -673,11 +718,9 @@ void page_pool_clear_pp_info(netmem_ref netmem)
netmem_set_pp(netmem, NULL);
}
-static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
- netmem_ref netmem)
+static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool,
+ netmem_ref netmem)
{
- struct page *old, *page = netmem_to_page(netmem);
- unsigned long id;
dma_addr_t dma;
if (!pool->dma_map)
@@ -686,15 +729,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
*/
return;
- id = netmem_get_dma_index(netmem);
- if (!id)
- return;
-
- if (in_softirq())
- old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0);
- else
- old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0);
- if (old != page)
+ if (page_pool_release_dma_index(pool, netmem))
return;
dma = page_pool_get_dma_addr_netmem(netmem);
@@ -704,7 +739,6 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
page_pool_set_dma_addr_netmem(netmem, 0);
- netmem_set_dma_index(netmem, 0);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -712,7 +746,7 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
* a regular page (that will eventually be returned to the normal
* page-allocator via put_page).
*/
-void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
+static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem)
{
int count;
bool put;
@@ -721,7 +755,7 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
put = pool->mp_ops->release_netmem(pool, netmem);
else
- __page_pool_release_page_dma(pool, netmem);
+ __page_pool_release_netmem_dma(pool, netmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@@ -826,7 +860,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
* will be invoking put_page.
*/
recycle_stat_inc(pool, released_refcnt);
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
return 0;
}
@@ -869,7 +903,7 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
if (netmem && !page_pool_recycle_in_ring(pool, netmem)) {
/* Cache full, fallback to free pages */
recycle_stat_inc(pool, ring_full);
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
}
}
EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
@@ -912,7 +946,7 @@ static void page_pool_recycle_ring_bulk(struct page_pool *pool,
* since put_page() with refcnt == 1 can be an expensive operation.
*/
for (; i < bulk_len; i++)
- page_pool_return_page(pool, bulk[i]);
+ page_pool_return_netmem(pool, bulk[i]);
}
/**
@@ -995,7 +1029,7 @@ static netmem_ref page_pool_drain_frag(struct page_pool *pool,
return netmem;
}
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
return 0;
}
@@ -1009,7 +1043,7 @@ static void page_pool_free_frag(struct page_pool *pool)
if (!netmem || page_pool_unref_netmem(netmem, drain_count))
return;
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
}
netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
@@ -1076,7 +1110,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
pr_crit("%s() page_pool refcnt %d violation\n",
__func__, netmem_ref_count(netmem));
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
}
}
@@ -1109,7 +1143,7 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
*/
while (pool->alloc.count) {
netmem = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
}
}
@@ -1136,7 +1170,7 @@ static void page_pool_scrub(struct page_pool *pool)
}
xa_for_each(&pool->dma_mapped, id, ptr)
- __page_pool_release_page_dma(pool, page_to_netmem(ptr));
+ __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr));
}
/* No more consumers should exist, but producers could still
@@ -1201,6 +1235,35 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
+/**
+ * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
+ * @pool: page pool to modify
+ * @napi: NAPI instance to associate the page pool with
+ *
+ * Associate a page pool with a NAPI instance for lockless page recycling.
+ * This is useful when a new page pool has to be added to a NAPI instance
+ * without disabling that NAPI instance, to mark the point at which control
+ * path "hands over" the page pool to the NAPI instance. In most cases driver
+ * can simply set the @napi field in struct page_pool_params, and does not
+ * have to call this helper.
+ *
+ * The function is idempotent, but does not implement any refcounting.
+ * Single page_pool_disable_direct_recycling() will disable recycling,
+ * no matter how many times enable was called.
+ */
+void page_pool_enable_direct_recycling(struct page_pool *pool,
+ struct napi_struct *napi)
+{
+ if (READ_ONCE(pool->p.napi) == napi)
+ return;
+ WARN_ON(!napi || pool->p.napi);
+
+ mutex_lock(&page_pools_lock);
+ WRITE_ONCE(pool->p.napi, napi);
+ mutex_unlock(&page_pools_lock);
+}
+EXPORT_SYMBOL(page_pool_enable_direct_recycling);
+
void page_pool_disable_direct_recycling(struct page_pool *pool)
{
/* Disable direct recycling based on pool->cpuid.
@@ -1253,7 +1316,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
/* Flush pool alloc cache, as refill will check NUMA node */
while (pool->alloc.count) {
netmem = pool->alloc.cache[--pool->alloc.count];
- page_pool_return_page(pool, netmem);
+ page_pool_return_netmem(pool, netmem);
}
}
EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0ebe5461d4d9..d41b03fd1f63 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -114,6 +114,7 @@
#include <linux/sys.h>
#include <linux/types.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -2841,8 +2842,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
}
i = 0;
- frag_len = (datalen/frags) < PAGE_SIZE ?
- (datalen/frags) : PAGE_SIZE;
+ frag_len = min_t(int, datalen / frags, PAGE_SIZE);
while (datalen > 0) {
if (unlikely(!pkt_dev->page)) {
int node = numa_node_id();
@@ -2859,8 +2859,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
if (i == (frags - 1))
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
pkt_dev->page, 0,
- (datalen < PAGE_SIZE ?
- datalen : PAGE_SIZE));
+ min(datalen, PAGE_SIZE));
else
skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i],
pkt_dev->page, 0, frag_len);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 63de5c635842..897a8f01a67b 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -77,9 +77,7 @@ void reqsk_queue_alloc(struct request_sock_queue *queue)
* a simple spin lock - one must consider sock_owned_by_user() and arrange
* to use sk_add_backlog() stuff. But what really makes it infeasible is the
* locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
- * acquire a child's lock while holding listener's socket lock. A corner
- * case might also exist in tcp_v4_hnd_req() that will trigger this locking
- * order.
+ * acquire a child's lock while holding listener's socket lock.
*
* This function also sets "treq->tfo_listener" to false.
* treq->tfo_listener is used by the listener so it is protected by the
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index c57692eb8da9..576d5ec3bb36 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -478,7 +478,7 @@ static int rtnl_unregister(int protocol, int msgtype)
* rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
* @protocol : Protocol family or PF_UNSPEC
*
- * Identical to calling rtnl_unregster() for all registered message types
+ * Identical to calling rtnl_unregister() for all registered message types
* of a certain protocol family.
*/
void rtnl_unregister_all(int protocol)
@@ -1026,9 +1026,11 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
.rta_error = error,
.rta_id = id,
};
+ unsigned long delta;
if (dst) {
- ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
+ delta = jiffies - READ_ONCE(dst->lastuse);
+ ci.rta_lastuse = jiffies_delta_to_clock_t(delta);
ci.rta_used = dst->__use;
ci.rta_clntref = rcuref_read(&dst->__rcuref);
}
@@ -1324,6 +1326,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_devlink_port_size(dev)
+ rtnl_dpll_pin_size(dev)
+ nla_total_size(8) /* IFLA_MAX_PACING_OFFLOAD_HORIZON */
+ + nla_total_size(2) /* IFLA_HEADROOM */
+ + nla_total_size(2) /* IFLA_TAILROOM */
+ 0;
}
@@ -1446,7 +1450,7 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
struct netdev_phys_item_id ppid = { };
int err;
- err = dev_get_port_parent_id(dev, &ppid, false);
+ err = netif_get_port_parent_id(dev, &ppid, false);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
@@ -2036,7 +2040,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
ifm->__ifi_pad = 0;
ifm->ifi_type = READ_ONCE(dev->type);
ifm->ifi_index = READ_ONCE(dev->ifindex);
- ifm->ifi_flags = dev_get_flags(dev);
+ ifm->ifi_flags = netif_get_flags(dev);
ifm->ifi_change = change;
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
@@ -2089,7 +2093,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
atomic_read(&dev->carrier_up_count)) ||
nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
- atomic_read(&dev->carrier_down_count)))
+ atomic_read(&dev->carrier_down_count)) ||
+ nla_put_u16(skb, IFLA_HEADROOM,
+ READ_ONCE(dev->needed_headroom)) ||
+ nla_put_u16(skb, IFLA_TAILROOM,
+ READ_ONCE(dev->needed_tailroom)))
goto nla_put_failure;
if (rtnl_fill_proto_down(skb, dev))
@@ -2241,6 +2249,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
[IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
[IFLA_NETNS_IMMUTABLE] = { .type = NLA_REJECT },
+ [IFLA_HEADROOM] = { .type = NLA_REJECT },
+ [IFLA_TAILROOM] = { .type = NLA_REJECT },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -4705,9 +4715,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u16 vid;
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
if (!del_bulk) {
err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
NULL, extack);
@@ -5225,7 +5232,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
ifm->__ifi_pad = 0;
ifm->ifi_type = dev->type;
ifm->ifi_index = dev->ifindex;
- ifm->ifi_flags = dev_get_flags(dev);
+ ifm->ifi_flags = netif_get_flags(dev);
ifm->ifi_change = 0;
diff --git a/net/core/scm.c b/net/core/scm.c
index 0225bd94170f..66eaee783e8b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -23,6 +23,8 @@
#include <linux/security.h>
#include <linux/pid_namespace.h>
#include <linux/pid.h>
+#include <uapi/linux/pidfd.h>
+#include <linux/pidfs.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/errqueue.h>
@@ -145,6 +147,22 @@ void __scm_destroy(struct scm_cookie *scm)
}
EXPORT_SYMBOL(__scm_destroy);
+static inline int scm_replace_pid(struct scm_cookie *scm, struct pid *pid)
+{
+ int err;
+
+ /* drop all previous references */
+ scm_destroy_cred(scm);
+
+ err = pidfs_register_pid(pid);
+ if (unlikely(err))
+ return err;
+
+ scm->pid = pid;
+ scm->creds.pid = pid_vnr(pid);
+ return 0;
+}
+
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
{
const struct proto_ops *ops = READ_ONCE(sock->ops);
@@ -189,15 +207,21 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
if (err)
goto error;
- p->creds.pid = creds.pid;
if (!p->pid || pid_vnr(p->pid) != creds.pid) {
struct pid *pid;
err = -ESRCH;
pid = find_get_pid(creds.pid);
if (!pid)
goto error;
- put_pid(p->pid);
- p->pid = pid;
+
+ /* pass a struct pid reference from
+ * find_get_pid() to scm_replace_pid().
+ */
+ err = scm_replace_pid(p, pid);
+ if (err) {
+ put_pid(pid);
+ goto error;
+ }
}
err = -EINVAL;
@@ -249,7 +273,9 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
check_object_size(data, cmlen - sizeof(*cm), true);
- if (!user_write_access_begin(cm, cmlen))
+ if (can_do_masked_user_access())
+ cm = masked_user_access_begin(cm);
+ else if (!user_write_access_begin(cm, cmlen))
goto efault;
unsafe_put_user(cmlen, &cm->cmsg_len, efault_end);
@@ -459,7 +485,7 @@ static void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
if (!scm->pid)
return;
- pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file);
+ pidfd = pidfd_prepare(scm->pid, PIDFD_STALE, &pidfd_file);
if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) {
if (pidfd_file) {
diff --git a/net/core/selftests.c b/net/core/selftests.c
index 35f807ea9952..3d79133a91a6 100644
--- a/net/core/selftests.c
+++ b/net/core/selftests.c
@@ -27,6 +27,7 @@ struct net_packet_attrs {
int max_size;
u8 id;
u16 queue_mapping;
+ bool bad_csum;
};
struct net_test_priv {
@@ -160,10 +161,25 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
if (attr->tcp) {
- thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr,
- ihdr->daddr, 0);
+ int l4len = skb->len - skb_transport_offset(skb);
+
+ thdr->check = ~tcp_v4_check(l4len, ihdr->saddr, ihdr->daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (attr->bad_csum) {
+ /* Force mangled checksum */
+ if (skb_checksum_help(skb)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (thdr->check != CSUM_MANGLED_0)
+ thdr->check = CSUM_MANGLED_0;
+ else
+ thdr->check = csum16_sub(thdr->check,
+ cpu_to_be16(1));
+ }
} else {
udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
}
@@ -238,7 +254,11 @@ static int net_test_loopback_validate(struct sk_buff *skb,
if (tpriv->packet->id != shdr->id)
goto out;
- tpriv->ok = true;
+ if (tpriv->packet->bad_csum && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ tpriv->ok = -EIO;
+ else
+ tpriv->ok = true;
+
complete(&tpriv->comp);
out:
kfree_skb(skb);
@@ -284,7 +304,12 @@ static int __net_test_loopback(struct net_device *ndev,
attr->timeout = NET_LB_TIMEOUT;
wait_for_completion_timeout(&tpriv->comp, attr->timeout);
- ret = tpriv->ok ? 0 : -ETIMEDOUT;
+ if (tpriv->ok < 0)
+ ret = tpriv->ok;
+ else if (!tpriv->ok)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
cleanup:
dev_remove_pack(&tpriv->pt);
@@ -344,6 +369,42 @@ static int net_test_phy_loopback_tcp(struct net_device *ndev)
return __net_test_loopback(ndev, &attr);
}
+/**
+ * net_test_phy_loopback_tcp_bad_csum - PHY loopback test with a deliberately
+ * corrupted TCP checksum
+ * @ndev: the network device to test
+ *
+ * Builds the same minimal Ethernet/IPv4/TCP frame as
+ * net_test_phy_loopback_tcp(), then flips the least-significant bit of the TCP
+ * checksum so the resulting value is provably invalid (neither 0 nor 0xFFFF).
+ * The frame is transmitted through the device’s internal PHY loopback path:
+ *
+ * test code -> MAC driver -> MAC HW -> xMII -> PHY ->
+ * internal PHY loopback -> xMII -> MAC HW -> MAC driver -> test code
+ *
+ * Result interpretation
+ * ---------------------
+ * 0 The frame is delivered to the stack and the driver reports
+ * ip_summed as CHECKSUM_NONE or CHECKSUM_COMPLETE - both are
+ * valid ways to indicate “bad checksum, let the stack verify.”
+ * -ETIMEDOUT The MAC/PHY silently dropped the frame; hardware checksum
+ * verification filtered it out before the driver saw it.
+ * -EIO The driver returned the frame with ip_summed ==
+ * CHECKSUM_UNNECESSARY, falsely claiming a valid checksum and
+ * indicating a serious RX-path defect.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int net_test_phy_loopback_tcp_bad_csum(struct net_device *ndev)
+{
+ struct net_packet_attrs attr = { };
+
+ attr.dst = ndev->dev_addr;
+ attr.tcp = true;
+ attr.bad_csum = true;
+ return __net_test_loopback(ndev, &attr);
+}
+
static const struct net_test {
char name[ETH_GSTRING_LEN];
int (*fn)(struct net_device *ndev);
@@ -368,6 +429,9 @@ static const struct net_test {
.name = "PHY internal loopback, TCP ",
.fn = net_test_phy_loopback_tcp,
}, {
+ .name = "PHY loopback, bad TCP csum ",
+ .fn = net_test_phy_loopback_tcp_bad_csum,
+ }, {
/* This test should be done after all PHY loopback test */
.name = "PHY internal loopback, disable",
.fn = net_test_phy_loopback_disable,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 85fc82f72d26..6be01454f262 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,7 @@
#include <net/mptcp.h>
#include <net/mctp.h>
#include <net/page_pool/helpers.h>
+#include <net/psp/types.h>
#include <net/dropreason.h>
#include <linux/uaccess.h>
@@ -384,8 +385,7 @@ static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
skb_set_kcov_handle(skb, kcov_common_handle());
}
-static inline void *__slab_build_skb(struct sk_buff *skb, void *data,
- unsigned int *size)
+static inline void *__slab_build_skb(void *data, unsigned int *size)
{
void *resized;
@@ -418,7 +418,7 @@ struct sk_buff *slab_build_skb(void *data)
return NULL;
memset(skb, 0, offsetof(struct sk_buff, tail));
- data = __slab_build_skb(skb, data, &size);
+ data = __slab_build_skb(data, &size);
__finalize_skb_around(skb, data, size);
return skb;
@@ -435,7 +435,7 @@ static void __build_skb_around(struct sk_buff *skb, void *data,
* using slab buffer should use slab_build_skb() instead.
*/
if (WARN_ONCE(size == 0, "Use slab_build_skb() instead"))
- data = __slab_build_skb(skb, data, &size);
+ data = __slab_build_skb(data, &size);
__finalize_skb_around(skb, data, size);
}
@@ -3060,10 +3060,8 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
-static bool spd_fill_page(struct splice_pipe_desc *spd,
- struct pipe_inode_info *pipe, struct page *page,
- unsigned int *len, unsigned int offset,
- bool linear,
+static bool spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+ unsigned int *len, unsigned int offset, bool linear,
struct sock *sk)
{
if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
@@ -3091,8 +3089,7 @@ static bool __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len,
struct splice_pipe_desc *spd, bool linear,
- struct sock *sk,
- struct pipe_inode_info *pipe)
+ struct sock *sk)
{
if (!*len)
return true;
@@ -3111,13 +3108,14 @@ static bool __splice_segment(struct page *page, unsigned int poff,
do {
unsigned int flen = min(*len, plen);
- if (spd_fill_page(spd, pipe, page, &flen, poff,
- linear, sk))
+ if (spd_fill_page(spd, page, &flen, poff, linear, sk))
return true;
poff += flen;
plen -= flen;
*len -= flen;
- } while (*len && plen);
+ if (!*len)
+ return true;
+ } while (plen);
return false;
}
@@ -3130,8 +3128,8 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
unsigned int *offset, unsigned int *len,
struct splice_pipe_desc *spd, struct sock *sk)
{
- int seg;
struct sk_buff *iter;
+ int seg;
/* map the linear part :
* If skb->head_frag is set, this 'linear' part is backed by a
@@ -3143,7 +3141,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
skb_headlen(skb),
offset, len, spd,
skb_head_is_locked(skb),
- sk, pipe))
+ sk))
return true;
/*
@@ -3160,7 +3158,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(skb_frag_page(f),
skb_frag_off(f), skb_frag_size(f),
- offset, len, spd, false, sk, pipe))
+ offset, len, spd, false, sk))
return true;
}
@@ -3235,6 +3233,7 @@ typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
int len, sendmsg_func sendmsg, int flags)
{
+ int more_hint = sk_is_tcp(sk) ? MSG_MORE : 0;
unsigned int orig_len = len;
struct sk_buff *head = skb;
unsigned short fragidx;
@@ -3252,6 +3251,8 @@ do_frag_list:
kv.iov_len = slen;
memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT | flags;
+ if (slen < len)
+ msg.msg_flags |= more_hint;
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
@@ -3292,6 +3293,8 @@ do_frag_list:
flags,
};
+ if (slen < len)
+ msg.msg_flags |= more_hint;
bvec_set_page(&bvec, skb_frag_page(frag), slen,
skb_frag_off(frag) + offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
@@ -5060,6 +5063,9 @@ static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
[SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ [SKB_EXT_PSP] = SKB_EXT_CHUNKSIZEOF(struct psp_skb_ext),
+#endif
};
static __always_inline unsigned int skb_ext_total_length(void)
@@ -6261,9 +6267,6 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
- if (!skb_frags_readable(skb))
- return -EFAULT;
-
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
@@ -6670,7 +6673,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
return NULL;
while (data_len) {
- if (nr_frags == MAX_SKB_FRAGS - 1)
+ if (nr_frags == MAX_SKB_FRAGS)
goto failure;
while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
order--;
@@ -6766,8 +6769,7 @@ static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
/* carve out the first eat bytes from skb's frag_list. May recurse into
* pskb_carve()
*/
-static int pskb_carve_frag_list(struct sk_buff *skb,
- struct skb_shared_info *shinfo, int eat,
+static int pskb_carve_frag_list(struct skb_shared_info *shinfo, int eat,
gfp_t gfp_mask)
{
struct sk_buff *list = shinfo->frag_list;
@@ -6872,7 +6874,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
skb_clone_fraglist(skb);
/* split line is in frag list */
- if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
+ if (k == 0 && pskb_carve_frag_list(shinfo, off - pos, gfp_mask)) {
/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
if (skb_has_frag_list(skb))
kfree_skb_list(skb_shinfo(skb)->frag_list);
@@ -7046,6 +7048,7 @@ void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
skb->active_extensions = 1 << id;
return skb_ext_get_ptr(ext, id);
}
+EXPORT_SYMBOL_NS_GPL(__skb_ext_set, "NETDEV_INTERNAL");
/**
* skb_ext_add - allocate space for given extension, COW if needed
@@ -7182,8 +7185,9 @@ static void kfree_skb_napi_cache(struct sk_buff *skb)
*/
void skb_attempt_defer_free(struct sk_buff *skb)
{
+ struct skb_defer_node *sdn;
+ unsigned long defer_count;
int cpu = skb->alloc_cpu;
- struct softnet_data *sd;
unsigned int defer_max;
bool kick;
@@ -7196,28 +7200,26 @@ nodefer: kfree_skb_napi_cache(skb);
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(skb->destructor);
+ DEBUG_NET_WARN_ON_ONCE(skb_nfct(skb));
+
+ sdn = per_cpu_ptr(net_hotdata.skb_defer_nodes, cpu) + numa_node_id();
- sd = &per_cpu(softnet_data, cpu);
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
- if (READ_ONCE(sd->defer_count) >= defer_max)
+ defer_count = atomic_long_inc_return(&sdn->defer_count);
+
+ if (defer_count >= defer_max)
goto nodefer;
- spin_lock_bh(&sd->defer_lock);
- /* Send an IPI every time queue reaches half capacity. */
- kick = sd->defer_count == (defer_max >> 1);
- /* Paired with the READ_ONCE() few lines above */
- WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
+ llist_add(&skb->ll_node, &sdn->defer_list);
- skb->next = sd->defer_list;
- /* Paired with READ_ONCE() in skb_defer_free_flush() */
- WRITE_ONCE(sd->defer_list, skb);
- spin_unlock_bh(&sd->defer_lock);
+ /* Send an IPI every time queue reaches half capacity. */
+ kick = (defer_count - 1) == (defer_max >> 1);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
if (unlikely(kick))
- kick_defer_list_purge(sd, cpu);
+ kick_defer_list_purge(cpu);
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
@@ -7237,7 +7239,6 @@ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
* @skb: The buffer to add pages to
* @iter: Iterator representing the pages to be added
* @maxsize: Maximum amount of pages to be added
- * @gfp: Allocation flags
*
* This is a common helper function for supporting MSG_SPLICE_PAGES. It
* extracts pages from an iterator and adds them to the socket buffer if
@@ -7248,7 +7249,7 @@ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
* insufficient space in the buffer to transfer anything.
*/
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
- ssize_t maxsize, gfp_t gfp)
+ ssize_t maxsize)
{
size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags);
struct page *pages[8], **ppages = pages;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 34c51eb1a14f..2ac7731e1e0a 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -656,6 +656,13 @@ static void sk_psock_backlog(struct work_struct *work)
bool ingress;
int ret;
+ /* If sk is quickly removed from the map and then added back, the old
+ * psock should not be scheduled, because there are now two psocks
+ * pointing to the same sk.
+ */
+ if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ return;
+
/* Increment the psock refcnt to synchronize with close(fd) path in
* sock_map_close(), ensuring we wait for backlog thread completion
* before sk_socket freed. If refcnt increment fails, it indicates
@@ -869,7 +876,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
sk_psock_stop(psock);
INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
- queue_rcu_work(system_wq, &psock->rwork);
+ queue_rcu_work(system_percpu_wq, &psock->rwork);
}
EXPORT_SYMBOL_GPL(sk_psock_drop);
diff --git a/net/core/sock.c b/net/core/sock.c
index 3b409bc8ef6d..dc03d4b5909a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -281,12 +281,12 @@ static struct lock_class_key af_elock_keys[AF_MAX];
static struct lock_class_key af_kern_callback_keys[AF_MAX];
/* Run time adjustable parameters. */
-__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_wmem_max __read_mostly = 4 << 20;
EXPORT_SYMBOL(sysctl_wmem_max);
-__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = 4 << 20;
EXPORT_SYMBOL(sysctl_rmem_max);
-__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
-__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_wmem_default __read_mostly = SK_WMEM_DEFAULT;
+__u32 sysctl_rmem_default __read_mostly = SK_RMEM_DEFAULT;
DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
EXPORT_SYMBOL_GPL(memalloc_socks_key);
@@ -491,13 +491,13 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct sk_buff_head *list = &sk->sk_receive_queue;
if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
}
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
return -ENOBUFS;
}
@@ -526,11 +526,10 @@ int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason drop_reason;
int err;
- err = sk_filter(sk, skb);
- if (err) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ err = sk_filter_reason(sk, skb, &drop_reason);
+ if (err)
goto out;
- }
+
err = __sock_queue_rcv_skb(sk, skb);
switch (err) {
case -ENOMEM:
@@ -553,15 +552,18 @@ EXPORT_SYMBOL(sock_queue_rcv_skb_reason);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested, unsigned int trim_cap, bool refcounted)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
int rc = NET_RX_SUCCESS;
+ int err;
- if (sk_filter_trim_cap(sk, skb, trim_cap))
+ if (sk_filter_trim_cap(sk, skb, trim_cap, &reason))
goto discard_and_relse;
skb->dev = NULL;
if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
+ reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
goto discard_and_relse;
}
if (nested)
@@ -577,9 +579,13 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
- } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
+ } else if ((err = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))) {
bh_unlock_sock(sk);
- atomic_inc(&sk->sk_drops);
+ if (err == -ENOMEM)
+ reason = SKB_DROP_REASON_PFMEMALLOC;
+ if (err == -ENOBUFS)
+ reason = SKB_DROP_REASON_SOCKET_BACKLOG;
+ sk_drops_inc(sk);
goto discard_and_relse;
}
@@ -589,7 +595,7 @@ out:
sock_put(sk);
return rc;
discard_and_relse:
- kfree_skb(skb);
+ sk_skb_reason_drop(sk, skb, reason);
goto out;
}
EXPORT_SYMBOL(__sk_receive_skb);
@@ -602,7 +608,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = __sk_dst_get(sk);
- if (dst && dst->obsolete &&
+ if (dst && READ_ONCE(dst->obsolete) &&
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
@@ -620,7 +626,7 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = sk_dst_get(sk);
- if (dst && dst->obsolete &&
+ if (dst && READ_ONCE(dst->obsolete) &&
INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check,
dst, cookie) == NULL) {
sk_dst_reset(sk);
@@ -818,12 +824,10 @@ EXPORT_SYMBOL(sock_set_priority);
void sock_set_sndtimeo(struct sock *sk, s64 secs)
{
- lock_sock(sk);
if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
else
WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
- release_sock(sk);
}
EXPORT_SYMBOL(sock_set_sndtimeo);
@@ -837,14 +841,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
}
}
-void sock_enable_timestamps(struct sock *sk)
-{
- lock_sock(sk);
- __sock_set_timestamps(sk, true, false, true);
- release_sock(sk);
-}
-EXPORT_SYMBOL(sock_enable_timestamps);
-
void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
{
switch (optname) {
@@ -1036,7 +1032,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
bool charged;
int pages;
- if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk))
+ if (!mem_cgroup_sk_enabled(sk) || !sk_has_account(sk))
return -EOPNOTSUPP;
if (!bytes)
@@ -1045,8 +1041,8 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
pages = sk_mem_pages(bytes);
/* pre-charge to memcg */
- charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ charged = mem_cgroup_sk_charge(sk, pages,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!charged)
return -ENOMEM;
@@ -1058,7 +1054,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
*/
if (allocated > sk_prot_mem_limits(sk, 1)) {
sk_memory_allocated_sub(sk, pages);
- mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
+ mem_cgroup_sk_uncharge(sk, pages);
return -ENOMEM;
}
sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
@@ -1295,6 +1291,14 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
case SO_DEVMEM_DONTNEED:
return sock_devmem_dontneed(sk, optval, optlen);
#endif
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ return sock_set_timeout(&sk->sk_sndtimeo, optval,
+ optlen, optname == SO_SNDTIMEO_OLD);
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ return sock_set_timeout(&sk->sk_rcvtimeo, optval,
+ optlen, optname == SO_RCVTIMEO_OLD);
}
sockopt_lock_sock(sk);
@@ -1450,18 +1454,6 @@ set_sndbuf:
WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
}
- case SO_RCVTIMEO_OLD:
- case SO_RCVTIMEO_NEW:
- ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
- optlen, optname == SO_RCVTIMEO_OLD);
- break;
-
- case SO_SNDTIMEO_OLD:
- case SO_SNDTIMEO_NEW:
- ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
- optlen, optname == SO_SNDTIMEO_OLD);
- break;
-
case SO_ATTACH_FILTER: {
struct sock_fprog fprog;
@@ -2513,15 +2505,18 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
newsk->sk_reserved_mem = 0;
- atomic_set(&newsk->sk_drops, 0);
+ DEBUG_NET_WARN_ON_ONCE(newsk->sk_drop_counters);
+ sk_drops_reset(newsk);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
+#ifdef CONFIG_MEMCG
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
+#endif
cgroup_sk_clone(&newsk->sk_cgrp_data);
@@ -2592,7 +2587,7 @@ free:
}
EXPORT_SYMBOL_GPL(sk_clone_lock);
-static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
+static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev)
{
bool is_ipv6 = false;
u32 max_size;
@@ -2602,8 +2597,8 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
#endif
/* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
- max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) :
- READ_ONCE(dst->dev->gso_ipv4_max_size);
+ max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) :
+ READ_ONCE(dev->gso_ipv4_max_size);
if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
max_size = GSO_LEGACY_MAX_SIZE;
@@ -2612,9 +2607,12 @@ static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
+ const struct net_device *dev;
u32 max_segs = 1;
- sk->sk_route_caps = dst->dev->features;
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ sk->sk_route_caps = dev->features;
if (sk_is_tcp(sk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2630,13 +2628,14 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
+ sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev);
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
- max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
+ max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1);
}
}
sk->sk_gso_max_segs = max_segs;
sk_dst_set(sk, dst);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(sk_setup_caps);
@@ -2788,39 +2787,6 @@ void sock_pfree(struct sk_buff *skb)
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
-kuid_t sock_i_uid(struct sock *sk)
-{
- kuid_t uid;
-
- read_lock_bh(&sk->sk_callback_lock);
- uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
- read_unlock_bh(&sk->sk_callback_lock);
- return uid;
-}
-EXPORT_SYMBOL(sock_i_uid);
-
-unsigned long __sock_i_ino(struct sock *sk)
-{
- unsigned long ino;
-
- read_lock(&sk->sk_callback_lock);
- ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock(&sk->sk_callback_lock);
- return ino;
-}
-EXPORT_SYMBOL(__sock_i_ino);
-
-unsigned long sock_i_ino(struct sock *sk)
-{
- unsigned long ino;
-
- local_bh_disable();
- ino = __sock_i_ino(sk);
- local_bh_enable();
- return ino;
-}
-EXPORT_SYMBOL(sock_i_ino);
-
/*
* Allocate a skb from the socket's send buffer.
*/
@@ -3199,23 +3165,27 @@ void __release_sock(struct sock *sk)
__acquires(&sk->sk_lock.slock)
{
struct sk_buff *skb, *next;
+ int nb = 0;
while ((skb = sk->sk_backlog.head) != NULL) {
sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
spin_unlock_bh(&sk->sk_lock.slock);
- do {
+ while (1) {
next = skb->next;
prefetch(next);
DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb));
skb_mark_not_on_list(skb);
sk_backlog_rcv(sk, skb);
- cond_resched();
-
skb = next;
- } while (skb != NULL);
+ if (!skb)
+ break;
+
+ if (!(++nb & 15))
+ cond_resched();
+ }
spin_lock_bh(&sk->sk_lock.slock);
}
@@ -3282,16 +3252,16 @@ EXPORT_SYMBOL(sk_wait_data);
*/
int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{
- struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
+ bool memcg_enabled = false, charged = false;
struct proto *prot = sk->sk_prot;
- bool charged = true;
long allocated;
sk_memory_allocated_add(sk, amt);
allocated = sk_memory_allocated(sk);
- if (memcg) {
- charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge());
+ if (mem_cgroup_sk_enabled(sk)) {
+ memcg_enabled = true;
+ charged = mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge());
if (!charged)
goto suppress_allocation;
}
@@ -3365,21 +3335,19 @@ suppress_allocation:
*/
if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
/* Force charge with __GFP_NOFAIL */
- if (memcg && !charged) {
- mem_cgroup_charge_skmem(memcg, amt,
- gfp_memcg_charge() | __GFP_NOFAIL);
- }
+ if (memcg_enabled && !charged)
+ mem_cgroup_sk_charge(sk, amt,
+ gfp_memcg_charge() | __GFP_NOFAIL);
return 1;
}
}
- if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
- trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
+ trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
sk_memory_allocated_sub(sk, amt);
- if (memcg && charged)
- mem_cgroup_uncharge_skmem(memcg, amt);
+ if (charged)
+ mem_cgroup_sk_uncharge(sk, amt);
return 0;
}
@@ -3417,8 +3385,8 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
{
sk_memory_allocated_sub(sk, amount);
- if (mem_cgroup_sockets_enabled && sk->sk_memcg)
- mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+ if (mem_cgroup_sk_enabled(sk))
+ mem_cgroup_sk_uncharge(sk, amount);
if (sk_under_global_memory_pressure(sk) &&
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
@@ -3732,7 +3700,7 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
*/
smp_wmb();
refcount_set(&sk->sk_refcnt, 1);
- atomic_set(&sk->sk_drops, 0);
+ sk_drops_reset(sk);
}
EXPORT_SYMBOL(sock_init_data_uid);
@@ -3992,7 +3960,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
- mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+ mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
}
#ifdef CONFIG_PROC_FS
@@ -4473,7 +4441,9 @@ static int __init sock_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
+#ifdef CONFIG_MEMCG
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
+#endif
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
@@ -4482,7 +4452,7 @@ static int __init sock_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
- CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_err_soft);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
@@ -4501,12 +4471,15 @@ static int __init sock_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_uid);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_protocol);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndbuf);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b23594c767f2..026ce9bd9e5e 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -348,7 +348,7 @@ static struct pernet_operations diag_net_ops = {
static int __init sock_diag_init(void)
{
- broadcast_wq = alloc_workqueue("sock_diag_events", 0, 0);
+ broadcast_wq = alloc_workqueue("sock_diag_events", WQ_PERCPU, 0);
BUG_ON(!broadcast_wq);
return register_pernet_subsys(&diag_net_ops);
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 82a14f131d00..5947b38e4f8b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1709,7 +1709,6 @@ EXPORT_SYMBOL_GPL(sock_map_close);
struct sockmap_link {
struct bpf_link link;
struct bpf_map *map;
- enum bpf_attach_type attach_type;
};
static void sock_map_link_release(struct bpf_link *link)
@@ -1721,7 +1720,7 @@ static void sock_map_link_release(struct bpf_link *link)
goto out;
WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link,
- sockmap_link->attach_type));
+ link->attach_type));
bpf_map_put_with_uref(sockmap_link->map);
sockmap_link->map = NULL;
@@ -1772,7 +1771,7 @@ static int sock_map_link_update_prog(struct bpf_link *link,
}
ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink,
- sockmap_link->attach_type);
+ link->attach_type);
if (ret)
goto out;
@@ -1817,7 +1816,7 @@ static int sock_map_link_fill_info(const struct bpf_link *link,
u32 map_id = sock_map_link_get_map_id(sockmap_link);
info->sockmap.map_id = map_id;
- info->sockmap.attach_type = sockmap_link->attach_type;
+ info->sockmap.attach_type = link->attach_type;
return 0;
}
@@ -1828,7 +1827,7 @@ static void sock_map_link_show_fdinfo(const struct bpf_link *link,
u32 map_id = sock_map_link_get_map_id(sockmap_link);
seq_printf(seq, "map_id:\t%u\n", map_id);
- seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type);
+ seq_printf(seq, "attach_type:\t%u\n", link->attach_type);
}
static const struct bpf_link_ops sock_map_link_ops = {
@@ -1866,9 +1865,9 @@ int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
}
attach_type = attr->link_create.attach_type;
- bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog);
+ bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog,
+ attach_type);
sockmap_link->map = map;
- sockmap_link->attach_type = attach_type;
ret = bpf_link_prime(&sockmap_link->link, &link_primer);
if (ret) {
diff --git a/net/core/stream.c b/net/core/stream.c
index b16dfa568a2d..7a37e7dd2c43 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -23,9 +23,13 @@
/**
* sk_stream_write_space - stream socket write_space callback.
- * @sk: socket
+ * @sk: pointer to the socket structure
*
- * FIXME: write proper description
+ * This function is invoked when there's space available in the socket's
+ * send buffer for writing. It first checks if the socket is writable,
+ * clears the SOCK_NOSPACE flag indicating that memory for writing
+ * is now available, wakes up any processes waiting for write operations
+ * and sends asynchronous notifications if needed.
*/
void sk_stream_write_space(struct sock *sk)
{
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5dbb2c6f371d..8cf04b57ade1 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -28,6 +28,7 @@
#include <net/rps.h>
#include "dev.h"
+#include "net-sysfs.h"
static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
@@ -96,50 +97,40 @@ free_buf:
#ifdef CONFIG_RPS
-static struct cpumask *rps_default_mask_cow_alloc(struct net *net)
-{
- struct cpumask *rps_default_mask;
-
- if (net->core.rps_default_mask)
- return net->core.rps_default_mask;
-
- rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL);
- if (!rps_default_mask)
- return NULL;
-
- /* pairs with READ_ONCE in rx_queue_default_mask() */
- WRITE_ONCE(net->core.rps_default_mask, rps_default_mask);
- return rps_default_mask;
-}
+DEFINE_MUTEX(rps_default_mask_mutex);
static int rps_default_mask_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)table->data;
+ struct cpumask *mask;
int err = 0;
- rtnl_lock();
+ mutex_lock(&rps_default_mask_mutex);
+ mask = net->core.rps_default_mask;
if (write) {
- struct cpumask *rps_default_mask = rps_default_mask_cow_alloc(net);
-
+ if (!mask) {
+ mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ net->core.rps_default_mask = mask;
+ }
err = -ENOMEM;
- if (!rps_default_mask)
+ if (!mask)
goto done;
- err = cpumask_parse(buffer, rps_default_mask);
+ err = cpumask_parse(buffer, mask);
if (err)
goto done;
- err = rps_cpumask_housekeeping(rps_default_mask);
+ err = rps_cpumask_housekeeping(mask);
if (err)
goto done;
} else {
err = dump_cpumask(buffer, lenp, ppos,
- net->core.rps_default_mask ? : cpu_none_mask);
+ mask ?: cpu_none_mask);
}
done:
- rtnl_unlock();
+ mutex_unlock(&rps_default_mask_mutex);
return err;
}
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 491334b9b8be..9100e160113a 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -663,9 +663,8 @@ struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp)
u32 tsize;
tsize = sinfo->xdp_frags_truesize ? : nr_frags * xdp->frame_sz;
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size, tsize,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ tsize, xdp_buff_get_skb_flags(xdp));
}
skb->protocol = eth_type_trans(skb, rxq->dev);
@@ -692,7 +691,7 @@ static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
struct skb_shared_info *sinfo = skb_shinfo(skb);
const struct skb_shared_info *xinfo;
u32 nr_frags, tsize = 0;
- bool pfmemalloc = false;
+ u32 flags = 0;
xinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = xinfo->nr_frags;
@@ -714,11 +713,12 @@ static noinline bool xdp_copy_frags_from_zc(struct sk_buff *skb,
__skb_fill_page_desc_noacc(sinfo, i, page, offset, len);
tsize += truesize;
- pfmemalloc |= page_is_pfmemalloc(page);
+ if (page_is_pfmemalloc(page))
+ flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
}
- xdp_update_skb_shared_info(skb, nr_frags, xinfo->xdp_frags_size,
- tsize, pfmemalloc);
+ xdp_update_skb_frags_info(skb, nr_frags, xinfo->xdp_frags_size, tsize,
+ flags);
return true;
}
@@ -823,10 +823,9 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
skb_metadata_set(skb, xdpf->metasize);
if (unlikely(xdp_frame_has_frags(xdpf)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdpf->frame_sz,
- xdp_frame_is_frag_pfmemalloc(xdpf));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdpf->frame_sz,
+ xdp_frame_get_skb_flags(xdpf));
/* Essential SKB info: protocol and skb->dev */
skb->protocol = eth_type_trans(skb, dev);
diff --git a/net/devlink/core.c b/net/devlink/core.c
index 7203c39532fc..58093f49c090 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -320,7 +320,7 @@ static void devlink_release(struct work_struct *work)
void devlink_put(struct devlink *devlink)
{
if (refcount_dec_and_test(&devlink->refcount))
- queue_rcu_work(system_wq, &devlink->rwork);
+ queue_rcu_work(system_percpu_wq, &devlink->rwork);
}
struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
diff --git a/net/devlink/health.c b/net/devlink/health.c
index b3ce8ecbb7fb..136a67c36a20 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -60,6 +60,7 @@ struct devlink_health_reporter {
struct devlink_port *devlink_port;
struct devlink_fmsg *dump_fmsg;
u64 graceful_period;
+ u64 burst_period;
bool auto_recover;
bool auto_dump;
u8 health_state;
@@ -108,11 +109,14 @@ devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
static struct devlink_health_reporter *
__devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
+ void *priv)
{
struct devlink_health_reporter *reporter;
- if (WARN_ON(graceful_period && !ops->recover))
+ if (WARN_ON(ops->default_graceful_period && !ops->recover))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(ops->default_burst_period && !ops->default_graceful_period))
return ERR_PTR(-EINVAL);
reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
@@ -122,7 +126,8 @@ __devlink_health_reporter_create(struct devlink *devlink,
reporter->priv = priv;
reporter->ops = ops;
reporter->devlink = devlink;
- reporter->graceful_period = graceful_period;
+ reporter->graceful_period = ops->default_graceful_period;
+ reporter->burst_period = ops->default_burst_period;
reporter->auto_recover = !!ops->recover;
reporter->auto_dump = !!ops->dump;
return reporter;
@@ -134,13 +139,12 @@ __devlink_health_reporter_create(struct devlink *devlink,
*
* @port: devlink_port to which health reports will relate
* @ops: devlink health reporter ops
- * @graceful_period: min time (in msec) between recovery attempts
* @priv: driver priv pointer
*/
struct devlink_health_reporter *
devl_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
+ void *priv)
{
struct devlink_health_reporter *reporter;
@@ -150,8 +154,7 @@ devl_port_health_reporter_create(struct devlink_port *port,
ops->name))
return ERR_PTR(-EEXIST);
- reporter = __devlink_health_reporter_create(port->devlink, ops,
- graceful_period, priv);
+ reporter = __devlink_health_reporter_create(port->devlink, ops, priv);
if (IS_ERR(reporter))
return reporter;
@@ -164,14 +167,13 @@ EXPORT_SYMBOL_GPL(devl_port_health_reporter_create);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
+ void *priv)
{
struct devlink_health_reporter *reporter;
struct devlink *devlink = port->devlink;
devl_lock(devlink);
- reporter = devl_port_health_reporter_create(port, ops,
- graceful_period, priv);
+ reporter = devl_port_health_reporter_create(port, ops, priv);
devl_unlock(devlink);
return reporter;
}
@@ -182,13 +184,12 @@ EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
*
* @devlink: devlink instance which the health reports will relate
* @ops: devlink health reporter ops
- * @graceful_period: min time (in msec) between recovery attempts
* @priv: driver priv pointer
*/
struct devlink_health_reporter *
devl_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
+ void *priv)
{
struct devlink_health_reporter *reporter;
@@ -197,8 +198,7 @@ devl_health_reporter_create(struct devlink *devlink,
if (devlink_health_reporter_find_by_name(devlink, ops->name))
return ERR_PTR(-EEXIST);
- reporter = __devlink_health_reporter_create(devlink, ops,
- graceful_period, priv);
+ reporter = __devlink_health_reporter_create(devlink, ops, priv);
if (IS_ERR(reporter))
return reporter;
@@ -210,13 +210,12 @@ EXPORT_SYMBOL_GPL(devl_health_reporter_create);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv)
+ void *priv)
{
struct devlink_health_reporter *reporter;
devl_lock(devlink);
- reporter = devl_health_reporter_create(devlink, ops,
- graceful_period, priv);
+ reporter = devl_health_reporter_create(devlink, ops, priv);
devl_unlock(devlink);
return reporter;
}
@@ -298,6 +297,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
reporter->graceful_period))
goto reporter_nest_cancel;
if (reporter->ops->recover &&
+ devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD,
+ reporter->burst_period))
+ goto reporter_nest_cancel;
+ if (reporter->ops->recover &&
nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER,
reporter->auto_recover))
goto reporter_nest_cancel;
@@ -462,16 +465,33 @@ int devlink_nl_health_reporter_set_doit(struct sk_buff *skb,
if (!reporter->ops->recover &&
(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] ||
- info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]))
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] ||
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD]))
return -EOPNOTSUPP;
if (!reporter->ops->dump &&
info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
return -EOPNOTSUPP;
- if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]) {
reporter->graceful_period =
nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]);
+ if (!reporter->graceful_period)
+ reporter->burst_period = 0;
+ }
+
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD]) {
+ u64 burst_period =
+ nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD]);
+
+ if (!reporter->graceful_period && burst_period) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Cannot set burst period without a grace period.");
+ return -EINVAL;
+ }
+
+ reporter->burst_period = burst_period;
+ }
if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])
reporter->auto_recover =
@@ -514,11 +534,25 @@ static void devlink_recover_notify(struct devlink_health_reporter *reporter,
devlink_nl_notify_send_desc(devlink, msg, &desc);
}
+static bool
+devlink_health_reporter_in_burst(struct devlink_health_reporter *reporter)
+{
+ unsigned long burst_threshold = reporter->last_recovery_ts +
+ msecs_to_jiffies(reporter->burst_period);
+
+ return time_is_after_jiffies(burst_threshold);
+}
+
void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter)
{
reporter->recovery_count++;
- reporter->last_recovery_ts = jiffies;
+ if (!devlink_health_reporter_in_burst(reporter))
+ /* When burst period is set, last_recovery_ts marks the first
+ * recovery within the burst period, not necessarily the last
+ * one.
+ */
+ reporter->last_recovery_ts = jiffies;
}
EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done);
@@ -592,12 +626,37 @@ dump_err:
return err;
}
+static bool
+devlink_health_recover_abort(struct devlink_health_reporter *reporter,
+ enum devlink_health_reporter_state prev_state)
+{
+ unsigned long recover_ts_threshold;
+
+ if (!reporter->auto_recover)
+ return false;
+
+ /* abort if the previous error wasn't recovered */
+ if (prev_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY)
+ return true;
+
+ if (devlink_health_reporter_in_burst(reporter))
+ return false;
+
+ recover_ts_threshold = reporter->last_recovery_ts +
+ msecs_to_jiffies(reporter->burst_period) +
+ msecs_to_jiffies(reporter->graceful_period);
+ if (reporter->last_recovery_ts && reporter->recovery_count &&
+ time_is_after_jiffies(recover_ts_threshold))
+ return true;
+
+ return false;
+}
+
int devlink_health_report(struct devlink_health_reporter *reporter,
const char *msg, void *priv_ctx)
{
enum devlink_health_reporter_state prev_health_state;
struct devlink *devlink = reporter->devlink;
- unsigned long recover_ts_threshold;
int ret;
/* write a log message of the current error */
@@ -608,13 +667,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
- /* abort if the previous error wasn't recovered */
- recover_ts_threshold = reporter->last_recovery_ts +
- msecs_to_jiffies(reporter->graceful_period);
- if (reporter->auto_recover &&
- (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
- (reporter->last_recovery_ts && reporter->recovery_count &&
- time_is_after_jiffies(recover_ts_threshold)))) {
+ if (devlink_health_recover_abort(reporter, prev_health_state)) {
trace_devlink_health_recover_aborted(devlink,
reporter->ops->name,
reporter->health_state,
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index e340d955cf3b..9fd00977d59e 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -45,6 +45,11 @@ const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_
[DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(15),
};
+const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_RATE_TC_ATTR_BW + 1] = {
+ [DEVLINK_RATE_TC_ATTR_INDEX] = NLA_POLICY_MAX(NLA_U8, DEVLINK_RATE_TC_INDEX_MAX),
+ [DEVLINK_RATE_TC_ATTR_BW] = { .type = NLA_U32, },
+};
+
const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1] = {
[DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG, },
};
@@ -384,7 +389,7 @@ static const struct nla_policy devlink_health_reporter_get_dump_nl_policy[DEVLIN
};
/* DEVLINK_CMD_HEALTH_REPORTER_SET - do */
-static const struct nla_policy devlink_health_reporter_set_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP + 1] = {
+static const struct nla_policy devlink_health_reporter_set_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
@@ -392,6 +397,7 @@ static const struct nla_policy devlink_health_reporter_set_nl_policy[DEVLINK_ATT
[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64, },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8, },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD] = { .type = NLA_U64, },
};
/* DEVLINK_CMD_HEALTH_REPORTER_RECOVER - do */
@@ -523,7 +529,7 @@ static const struct nla_policy devlink_rate_get_dump_nl_policy[DEVLINK_ATTR_DEV_
};
/* DEVLINK_CMD_RATE_SET - do */
-static const struct nla_policy devlink_rate_set_nl_policy[DEVLINK_ATTR_RATE_TX_WEIGHT + 1] = {
+static const struct nla_policy devlink_rate_set_nl_policy[DEVLINK_ATTR_RATE_TC_BWS + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
@@ -532,10 +538,11 @@ static const struct nla_policy devlink_rate_set_nl_policy[DEVLINK_ATTR_RATE_TX_W
[DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32, },
[DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32, },
[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_TC_BWS] = NLA_POLICY_NESTED(devlink_dl_rate_tc_bws_nl_policy),
};
/* DEVLINK_CMD_RATE_NEW - do */
-static const struct nla_policy devlink_rate_new_nl_policy[DEVLINK_ATTR_RATE_TX_WEIGHT + 1] = {
+static const struct nla_policy devlink_rate_new_nl_policy[DEVLINK_ATTR_RATE_TC_BWS + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
@@ -544,6 +551,7 @@ static const struct nla_policy devlink_rate_new_nl_policy[DEVLINK_ATTR_RATE_TX_W
[DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32, },
[DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32, },
[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_TC_BWS] = NLA_POLICY_NESTED(devlink_dl_rate_tc_bws_nl_policy),
};
/* DEVLINK_CMD_RATE_DEL - do */
@@ -1025,7 +1033,7 @@ const struct genl_split_ops devlink_nl_ops[74] = {
.doit = devlink_nl_health_reporter_set_doit,
.post_doit = devlink_nl_post_doit,
.policy = devlink_health_reporter_set_nl_policy,
- .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
@@ -1191,7 +1199,7 @@ const struct genl_split_ops devlink_nl_ops[74] = {
.doit = devlink_nl_rate_set_doit,
.post_doit = devlink_nl_post_doit,
.policy = devlink_rate_set_nl_policy,
- .maxattr = DEVLINK_ATTR_RATE_TX_WEIGHT,
+ .maxattr = DEVLINK_ATTR_RATE_TC_BWS,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
@@ -1201,7 +1209,7 @@ const struct genl_split_ops devlink_nl_ops[74] = {
.doit = devlink_nl_rate_new_doit,
.post_doit = devlink_nl_post_doit,
.policy = devlink_rate_new_nl_policy,
- .maxattr = DEVLINK_ATTR_RATE_TX_WEIGHT,
+ .maxattr = DEVLINK_ATTR_RATE_TC_BWS,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/net/devlink/netlink_gen.h b/net/devlink/netlink_gen.h
index 8f2bd50ddf5e..09cc6f264ccf 100644
--- a/net/devlink/netlink_gen.h
+++ b/net/devlink/netlink_gen.h
@@ -13,6 +13,7 @@
/* Common nested types */
extern const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_CAPS + 1];
+extern const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_RATE_TC_ATTR_BW + 1];
extern const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1];
/* Ops table for devlink */
diff --git a/net/devlink/param.c b/net/devlink/param.c
index b29abf8d3ed4..70e69523412c 100644
--- a/net/devlink/param.c
+++ b/net/devlink/param.c
@@ -92,6 +92,26 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
.type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ .name = DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME,
+ .type = DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_TOTAL_VFS,
+ .name = DEVLINK_PARAM_GENERIC_TOTAL_VFS_NAME,
+ .type = DEVLINK_PARAM_GENERIC_TOTAL_VFS_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ .name = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME,
+ .type = DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -195,6 +215,11 @@ devlink_nl_param_value_fill_one(struct sk_buff *msg,
if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
goto value_nest_cancel;
break;
+ case DEVLINK_PARAM_TYPE_U64:
+ if (devlink_nl_put_u64(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
+ val.vu64))
+ goto value_nest_cancel;
+ break;
case DEVLINK_PARAM_TYPE_STRING:
if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
val.vstr))
@@ -429,6 +454,11 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
return -EINVAL;
value->vu32 = nla_get_u32(param_data);
break;
+ case DEVLINK_PARAM_TYPE_U64:
+ if (nla_len(param_data) != sizeof(u64))
+ return -EINVAL;
+ value->vu64 = nla_get_u64(param_data);
+ break;
case DEVLINK_PARAM_TYPE_STRING:
len = strnlen(nla_data(param_data), nla_len(param_data));
if (len == nla_len(param_data) ||
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 939081a0e615..93d8a25bb920 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -1333,8 +1333,8 @@ int devlink_port_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
- enum devlink_port_flavour flavour)
+static void __devlink_port_attrs_set(struct devlink_port *devlink_port,
+ enum devlink_port_flavour flavour)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
@@ -1347,7 +1347,6 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
} else {
devlink_port->switch_port = false;
}
- return 0;
}
/**
@@ -1357,17 +1356,13 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
* @attrs: devlink port attrs
*/
void devlink_port_attrs_set(struct devlink_port *devlink_port,
- struct devlink_port_attrs *attrs)
+ const struct devlink_port_attrs *attrs)
{
- int ret;
-
ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+ WARN_ON(attrs->splittable && attrs->split);
devlink_port->attrs = *attrs;
- ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
- if (ret)
- return;
- WARN_ON(attrs->splittable && attrs->split);
+ __devlink_port_attrs_set(devlink_port, attrs->flavour);
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
@@ -1383,14 +1378,10 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro
u16 pf, bool external)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_PF);
- if (ret)
- return;
+ __devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PCI_PF);
attrs->pci_pf.controller = controller;
attrs->pci_pf.pf = pf;
attrs->pci_pf.external = external;
@@ -1411,14 +1402,10 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro
u16 pf, u16 vf, bool external)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_VF);
- if (ret)
- return;
+ __devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PCI_VF);
attrs->pci_vf.controller = controller;
attrs->pci_vf.pf = pf;
attrs->pci_vf.vf = vf;
@@ -1439,14 +1426,10 @@ void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 contro
u16 pf, u32 sf, bool external)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_SF);
- if (ret)
- return;
+ __devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PCI_SF);
attrs->pci_sf.controller = controller;
attrs->pci_sf.pf = pf;
attrs->pci_sf.sf = sf;
@@ -1519,7 +1502,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int n = 0;
- if (!devlink_port->attrs_set)
+ if (!devlink_port->attrs_set || devlink_port->attrs.no_phys_port_name)
return -EOPNOTSUPP;
switch (attrs->flavour) {
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index 8828ffaf6cbc..264fb82cba19 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -34,7 +34,7 @@ devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
static struct devlink_rate *
devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
{
- static struct devlink_rate *devlink_rate;
+ struct devlink_rate *devlink_rate;
list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
if (devlink_rate_is_node(devlink_rate) &&
@@ -80,6 +80,29 @@ devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
return ERR_PTR(-EINVAL);
}
+static int devlink_rate_put_tc_bws(struct sk_buff *msg, u32 *tc_bw)
+{
+ struct nlattr *nla_tc_bw;
+ int i;
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ nla_tc_bw = nla_nest_start(msg, DEVLINK_ATTR_RATE_TC_BWS);
+ if (!nla_tc_bw)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, DEVLINK_RATE_TC_ATTR_INDEX, i) ||
+ nla_put_u32(msg, DEVLINK_RATE_TC_ATTR_BW, tc_bw[i]))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nla_tc_bw);
+ }
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, nla_tc_bw);
+ return -EMSGSIZE;
+}
+
static int devlink_nl_rate_fill(struct sk_buff *msg,
struct devlink_rate *devlink_rate,
enum devlink_command cmd, u32 portid, u32 seq,
@@ -129,6 +152,9 @@ static int devlink_nl_rate_fill(struct sk_buff *msg,
devlink_rate->parent->name))
goto nla_put_failure;
+ if (devlink_rate_put_tc_bws(msg, devlink_rate->tc_bw))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
@@ -316,6 +342,87 @@ devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
return 0;
}
+static int devlink_nl_rate_tc_bw_parse(struct nlattr *parent_nest, u32 *tc_bw,
+ unsigned long *bitmap,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DEVLINK_RATE_TC_ATTR_MAX + 1];
+ u8 tc_index;
+ int err;
+
+ err = nla_parse_nested(tb, DEVLINK_RATE_TC_ATTR_MAX, parent_nest,
+ devlink_dl_rate_tc_bws_nl_policy, extack);
+ if (err)
+ return err;
+
+ if (!tb[DEVLINK_RATE_TC_ATTR_INDEX]) {
+ NL_SET_ERR_ATTR_MISS(extack, parent_nest,
+ DEVLINK_RATE_TC_ATTR_INDEX);
+ return -EINVAL;
+ }
+
+ tc_index = nla_get_u8(tb[DEVLINK_RATE_TC_ATTR_INDEX]);
+
+ if (!tb[DEVLINK_RATE_TC_ATTR_BW]) {
+ NL_SET_ERR_ATTR_MISS(extack, parent_nest,
+ DEVLINK_RATE_TC_ATTR_BW);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tc_index, bitmap)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Duplicate traffic class index specified (%u)",
+ tc_index);
+ return -EINVAL;
+ }
+
+ tc_bw[tc_index] = nla_get_u32(tb[DEVLINK_RATE_TC_ATTR_BW]);
+
+ return 0;
+}
+
+static int devlink_nl_rate_tc_bw_set(struct devlink_rate *devlink_rate,
+ struct genl_info *info)
+{
+ DECLARE_BITMAP(bitmap, DEVLINK_RATE_TCS_MAX) = {};
+ struct devlink *devlink = devlink_rate->devlink;
+ const struct devlink_ops *ops = devlink->ops;
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX] = {};
+ int rem, err = -EOPNOTSUPP, i;
+ struct nlattr *attr;
+
+ nlmsg_for_each_attr_type(attr, DEVLINK_ATTR_RATE_TC_BWS, info->nlhdr,
+ GENL_HDRLEN, rem) {
+ err = devlink_nl_rate_tc_bw_parse(attr, tc_bw, bitmap,
+ info->extack);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < DEVLINK_RATE_TCS_MAX; i++) {
+ if (!test_bit(i, bitmap)) {
+ NL_SET_ERR_MSG_FMT(info->extack,
+ "Bandwidth values must be specified for all %u traffic classes",
+ DEVLINK_RATE_TCS_MAX);
+ return -EINVAL;
+ }
+ }
+
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tc_bw_set(devlink_rate, devlink_rate->priv,
+ tc_bw, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tc_bw_set(devlink_rate, devlink_rate->priv,
+ tc_bw, info->extack);
+
+ if (err)
+ return err;
+
+ memcpy(devlink_rate->tc_bw, tc_bw, sizeof(tc_bw));
+
+ return 0;
+}
+
static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
const struct devlink_ops *ops,
struct genl_info *info)
@@ -388,6 +495,12 @@ static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
return err;
}
+ if (attrs[DEVLINK_ATTR_RATE_TC_BWS]) {
+ err = devlink_nl_rate_tc_bw_set(devlink_rate, info);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -423,6 +536,13 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
"TX weight set isn't supported for the leafs");
return false;
}
+ if (attrs[DEVLINK_ATTR_RATE_TC_BWS] &&
+ !ops->rate_leaf_tc_bw_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TC_BWS],
+ "TC bandwidth set isn't supported for the leafs");
+ return false;
+ }
} else if (type == DEVLINK_RATE_TYPE_NODE) {
if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the nodes");
@@ -449,6 +569,13 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
"TX weight set isn't supported for the nodes");
return false;
}
+ if (attrs[DEVLINK_ATTR_RATE_TC_BWS] &&
+ !ops->rate_node_tc_bw_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TC_BWS],
+ "TC bandwidth set isn't supported for the nodes");
+ return false;
+ }
} else {
WARN(1, "Unknown type of rate object");
return false;
@@ -692,8 +819,8 @@ EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
*/
void devl_rate_nodes_destroy(struct devlink *devlink)
{
- static struct devlink_rate *devlink_rate, *tmp;
const struct devlink_ops *ops = devlink->ops;
+ struct devlink_rate *devlink_rate, *tmp;
devl_assert_locked(devlink);
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 2dfe9063613f..869cbe57162f 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -42,12 +42,24 @@ config NET_DSA_TAG_BRCM
Broadcom switches which place the tag after the MAC source address.
config NET_DSA_TAG_BRCM_LEGACY
- tristate "Tag driver for Broadcom legacy switches using in-frame headers"
+ tristate "Tag driver for BCM63xx legacy switches using in-frame headers"
select NET_DSA_TAG_BRCM_COMMON
help
Say Y if you want to enable support for tagging frames for the
- Broadcom legacy switches which place the tag after the MAC source
+ BCM63xx legacy switches which place the tag after the MAC source
address.
+ This tag is used in BCM63xx legacy switches which work without the
+ original FCS and length before the tag insertion.
+
+config NET_DSA_TAG_BRCM_LEGACY_FCS
+ tristate "Tag driver for BCM53xx legacy switches using in-frame headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ BCM53xx legacy switches which place the tag after the MAC source
+ address.
+ This tag is used in BCM53xx legacy switches which expect original
+ FCS and length before the tag insertion to be present.
config NET_DSA_TAG_BRCM_PREPEND
tristate "Tag driver for Broadcom switches using prepended headers"
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 436a7e1b412a..5b01a0e43ebe 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -1621,7 +1621,7 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
dsa_switch_for_each_cpu_port(dp, ds)
list_add(&dp->conduit->close_list, &close_list);
- dev_close_many(&close_list, true);
+ netif_close_many(&close_list, true);
dsa_switch_for_each_user_port(dp, ds) {
conduit = dsa_port_to_conduit(dp);
@@ -1829,3 +1829,4 @@ MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:dsa");
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index fe75821623a4..26bb657ceac3 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -15,6 +15,7 @@
#define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy"
+#define BRCM_LEGACY_FCS_NAME "brcm-legacy-fcs"
#define BRCM_PREPEND_NAME "brcm-prepend"
/* Legacy Broadcom tag (6 bytes) */
@@ -32,6 +33,10 @@
#define BRCM_LEG_MULTICAST (1 << 5)
#define BRCM_LEG_EGRESS (2 << 5)
#define BRCM_LEG_INGRESS (3 << 5)
+#define BRCM_LEG_LEN_HI(x) (((x) >> 8) & 0x7)
+
+/* 4th byte in the tag */
+#define BRCM_LEG_LEN_LO(x) ((x) & 0xff)
/* 6th byte in the tag */
#define BRCM_LEG_PORT_ID (0xf)
@@ -212,6 +217,41 @@ DSA_TAG_DRIVER(brcm_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) || \
+ IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int len = BRCM_LEG_TAG_LEN;
+ int source_port;
+ u8 *brcm_tag;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ return NULL;
+
+ brcm_tag = dsa_etype_header_pos_rx(skb);
+
+ source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /* VLAN tag is added by BCM63xx internal switch */
+ if (netdev_uses_dsa(skb->dev))
+ len += VLAN_HLEN;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, len);
+
+ dsa_default_offload_fwd_mark(skb);
+
+ dsa_strip_etype_header(skb, len);
+
+ return skb;
+}
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY || CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
+
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -250,49 +290,77 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
return skb;
}
-static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
- struct net_device *dev)
+static const struct dsa_device_ops brcm_legacy_netdev_ops = {
+ .name = BRCM_LEGACY_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY,
+ .xmit = brcm_leg_tag_xmit,
+ .rcv = brcm_leg_tag_rcv,
+ .needed_headroom = BRCM_LEG_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_fcs_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int len = BRCM_LEG_TAG_LEN;
- int source_port;
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned int fcs_len;
+ __le32 fcs_val;
u8 *brcm_tag;
- if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 70 bytes (including FCS
+ * and tag) because the length verification is done after the Broadcom
+ * tag is stripped off the ingress packet.
+ *
+ * Let dsa_user_xmit() free the SKB.
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
- brcm_tag = dsa_etype_header_pos_rx(skb);
+ fcs_len = skb->len;
+ fcs_val = cpu_to_le32(crc32_le(~0, skb->data, fcs_len) ^ ~0);
- source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+ skb_push(skb, BRCM_LEG_TAG_LEN);
- skb->dev = dsa_conduit_find_user(dev, 0, source_port);
- if (!skb->dev)
- return NULL;
+ dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN);
- /* VLAN tag is added by BCM63xx internal switch */
- if (netdev_uses_dsa(skb->dev))
- len += VLAN_HLEN;
+ brcm_tag = skb->data + 2 * ETH_ALEN;
- /* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, len);
+ /* Broadcom tag type */
+ brcm_tag[0] = BRCM_LEG_TYPE_HI;
+ brcm_tag[1] = BRCM_LEG_TYPE_LO;
- dsa_default_offload_fwd_mark(skb);
+ /* Broadcom tag value */
+ brcm_tag[2] = BRCM_LEG_EGRESS | BRCM_LEG_LEN_HI(fcs_len);
+ brcm_tag[3] = BRCM_LEG_LEN_LO(fcs_len);
+ brcm_tag[4] = 0;
+ brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID;
- dsa_strip_etype_header(skb, len);
+ /* Original FCS value */
+ if (__skb_pad(skb, ETH_FCS_LEN, false))
+ return NULL;
+ skb_put_data(skb, &fcs_val, ETH_FCS_LEN);
return skb;
}
-static const struct dsa_device_ops brcm_legacy_netdev_ops = {
- .name = BRCM_LEGACY_NAME,
- .proto = DSA_TAG_PROTO_BRCM_LEGACY,
- .xmit = brcm_leg_tag_xmit,
+static const struct dsa_device_ops brcm_legacy_fcs_netdev_ops = {
+ .name = BRCM_LEGACY_FCS_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY_FCS,
+ .xmit = brcm_leg_fcs_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
-DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
-#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+DSA_TAG_DRIVER(brcm_legacy_fcs_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY_FCS, BRCM_LEGACY_FCS_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
@@ -328,6 +396,9 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
&DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops),
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+ &DSA_TAG_DRIVER_NAME(brcm_legacy_fcs_netdev_ops),
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
#endif
diff --git a/net/dsa/user.c b/net/dsa/user.c
index e9334520c54a..f59d66f0975d 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -3604,7 +3604,7 @@ static int dsa_user_netdevice_event(struct notifier_block *nb,
list_add(&dp->user->close_list, &close_list);
}
- dev_close_many(&close_list, true);
+ netif_close_many(&close_list, true);
return NOTIFY_OK;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 4e3651101b86..43e211e611b1 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -613,7 +613,10 @@ EXPORT_SYMBOL(fwnode_get_mac_address);
*/
int device_get_mac_address(struct device *dev, char *addr)
{
- return fwnode_get_mac_address(dev_fwnode(dev), addr);
+ if (!fwnode_get_mac_address(dev_fwnode(dev), addr))
+ return 0;
+
+ return nvmem_get_mac_address(dev, addr);
}
EXPORT_SYMBOL(device_get_mac_address);
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index a1490c4afe6b..1e493553b977 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -8,5 +8,5 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \
linkstate.o debug.o wol.o features.o privflags.o rings.o \
channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \
- module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o \
+ module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o \
phy.o tsconfig.o
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index eb253e0fd61b..55223ebc2a7e 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -577,6 +577,26 @@ int __ethtool_get_link(struct net_device *dev)
return netif_running(dev) && dev->ethtool_ops->get_link(dev);
}
+int ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxnfc rx_rings = {};
+ int ret;
+
+ if (ops->get_rx_ring_count)
+ return ops->get_rx_ring_count(dev);
+
+ if (!ops->get_rxnfc)
+ return -EOPNOTSUPP;
+
+ rx_rings.cmd = ETHTOOL_GRXRINGS;
+ ret = ops->get_rxnfc(dev, &rx_rings, NULL);
+ if (ret < 0)
+ return ret;
+
+ return rx_rings.data;
+}
+
static int ethtool_get_rxnfc_rule_count(struct net_device *dev)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -707,7 +727,9 @@ static u32 ethtool_get_max_rxfh_channel(struct net_device *dev)
if (!rxfh.indir)
return U32_MAX;
+ mutex_lock(&dev->ethtool->rss_lock);
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
+ mutex_unlock(&dev->ethtool->rss_lock);
if (ret) {
current_max = U32_MAX;
goto out_free;
@@ -804,12 +826,67 @@ out_free:
return rc;
}
+struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size)
+{
+ size_t indir_bytes, flex_len, key_off, size;
+ struct ethtool_rxfh_context *ctx;
+ u32 priv_bytes, indir_max;
+ u16 key_max;
+
+ key_max = max(key_size, ops->rxfh_key_space);
+ indir_max = max(indir_size, ops->rxfh_indir_space);
+
+ priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
+ indir_bytes = array_size(indir_max, sizeof(u32));
+
+ key_off = size_add(priv_bytes, indir_bytes);
+ flex_len = size_add(key_off, key_max);
+ size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
+
+ ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!ctx)
+ return NULL;
+
+ ctx->indir_size = indir_size;
+ ctx->key_size = key_size;
+ ctx->key_off = key_off;
+ ctx->priv_size = ops->rxfh_priv_size;
+
+ ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
+ ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
+
+ return ctx;
+}
+
+/* Check if fields configured for flow hash are symmetric - if src is included
+ * so is dst and vice versa.
+ */
+int ethtool_rxfh_config_is_sym(u64 rxfh)
+{
+ bool sym;
+
+ sym = rxfh == (rxfh & (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3));
+ sym &= !!(rxfh & RXH_IP_SRC) == !!(rxfh & RXH_IP_DST);
+ sym &= !!(rxfh & RXH_L4_B_0_1) == !!(rxfh & RXH_L4_B_2_3);
+
+ return sym;
+}
+
int ethtool_check_ops(const struct ethtool_ops *ops)
{
if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params))
return -EINVAL;
if (WARN_ON(ops->rxfh_max_num_contexts == 1))
return -EINVAL;
+ if (WARN_ON(ops->supported_input_xfrm && !ops->get_rxfh_fields))
+ return -EINVAL;
+ if (WARN_ON(ops->supported_input_xfrm &&
+ ops->rxfh_per_ctx_fields != ops->rxfh_per_ctx_key))
+ return -EINVAL;
+
/* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime,
* the fact that ops are checked at registration time does not
* mean the ops attached to a netdev later on are sane.
@@ -848,7 +925,7 @@ int ethtool_net_get_ts_info_by_phc(struct net_device *dev,
int err;
if (!ops->get_ts_info)
- return -ENODEV;
+ return -EOPNOTSUPP;
/* Does ptp comes from netdev */
ethtool_init_tsinfo(info);
@@ -916,7 +993,7 @@ int ethtool_get_ts_info_by_phc(struct net_device *dev,
int err;
err = ethtool_net_get_ts_info_by_phc(dev, info, hwprov_desc);
- if (err == -ENODEV) {
+ if (err == -ENODEV || err == -EOPNOTSUPP) {
struct phy_device *phy;
phy = ethtool_phy_get_ts_info_by_phc(dev, info, hwprov_desc);
@@ -1079,5 +1156,6 @@ void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id)
netdev_err(dev, "device error, RSS context %d lost\n", context_id);
ctx = xa_erase(&dev->ethtool->rss_ctx, context_id);
kfree(ctx);
+ ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_DELETE_NTF, context_id);
}
EXPORT_SYMBOL(ethtool_rxfh_context_lost);
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index b4683d286a5a..1609cf4e53eb 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -43,13 +43,19 @@ bool convert_legacy_settings_to_link_ksettings(
int ethtool_check_max_channel(struct net_device *dev,
struct ethtool_channels channels,
struct genl_info *info);
+struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size);
int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context);
+int ethtool_rxfh_config_is_sym(u64 rxfh);
void ethtool_ringparam_get_cfg(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kparam,
struct netlink_ext_ack *extack);
+int ethtool_get_rx_ring_count(struct net_device *dev);
+
int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
int ethtool_get_ts_info_by_phc(struct net_device *dev,
struct kernel_ethtool_ts_info *info,
@@ -74,4 +80,13 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
bool __ethtool_dev_mm_supported(struct net_device *dev);
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context);
+#else
+static inline void
+ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context)
+{
+}
+#endif
+
#endif /* _ETHTOOL_COMMON_H */
diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c
index e7d3f2c352a3..4669e74cbcaa 100644
--- a/net/ethtool/fec.c
+++ b/net/ethtool/fec.c
@@ -17,6 +17,7 @@ struct fec_reply_data {
u64 stats[1 + ETHTOOL_MAX_LANES];
u8 cnt;
} corr, uncorr, corr_bits;
+ struct ethtool_fec_hist fec_stat_hist;
};
#define FEC_REPDATA(__reply_base) \
@@ -113,7 +114,10 @@ static int fec_prepare_data(const struct ethnl_req_info *req_base,
struct ethtool_fec_stats stats;
ethtool_stats_init((u64 *)&stats, sizeof(stats) / 8);
- dev->ethtool_ops->get_fec_stats(dev, &stats);
+ ethtool_stats_init((u64 *)data->fec_stat_hist.values,
+ sizeof(data->fec_stat_hist.values) / 8);
+ dev->ethtool_ops->get_fec_stats(dev, &stats,
+ &data->fec_stat_hist);
fec_stats_recalc(&data->corr, &stats.corrected_blocks);
fec_stats_recalc(&data->uncorr, &stats.uncorrectable_blocks);
@@ -157,13 +161,77 @@ static int fec_reply_size(const struct ethnl_req_info *req_base,
len += nla_total_size(sizeof(u8)) + /* _FEC_AUTO */
nla_total_size(sizeof(u32)); /* _FEC_ACTIVE */
- if (req_base->flags & ETHTOOL_FLAG_STATS)
+ if (req_base->flags & ETHTOOL_FLAG_STATS) {
len += 3 * nla_total_size_64bit(sizeof(u64) *
(1 + ETHTOOL_MAX_LANES));
+ /* add FEC bins information */
+ len += (nla_total_size(0) + /* _A_FEC_HIST */
+ nla_total_size(4) + /* _A_FEC_HIST_BIN_LOW */
+ nla_total_size(4) + /* _A_FEC_HIST_BIN_HI */
+ /* _A_FEC_HIST_BIN_VAL + per-lane values */
+ nla_total_size_64bit(sizeof(u64)) +
+ nla_total_size_64bit(sizeof(u64) * ETHTOOL_MAX_LANES)) *
+ ETHTOOL_FEC_HIST_MAX;
+ }
return len;
}
+static int fec_put_hist(struct sk_buff *skb,
+ const struct ethtool_fec_hist *hist)
+{
+ const struct ethtool_fec_hist_range *ranges = hist->ranges;
+ const struct ethtool_fec_hist_value *values = hist->values;
+ struct nlattr *nest;
+ int i, j;
+ u64 sum;
+
+ if (!ranges)
+ return 0;
+
+ for (i = 0; i < ETHTOOL_FEC_HIST_MAX; i++) {
+ if (i && !ranges[i].low && !ranges[i].high)
+ break;
+
+ if (WARN_ON_ONCE(values[i].sum == ETHTOOL_STAT_NOT_SET &&
+ values[i].per_lane[0] == ETHTOOL_STAT_NOT_SET))
+ break;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_FEC_STAT_HIST);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, ETHTOOL_A_FEC_HIST_BIN_LOW,
+ ranges[i].low) ||
+ nla_put_u32(skb, ETHTOOL_A_FEC_HIST_BIN_HIGH,
+ ranges[i].high))
+ goto err_cancel_hist;
+ sum = 0;
+ for (j = 0; j < ETHTOOL_MAX_LANES; j++) {
+ if (values[i].per_lane[j] == ETHTOOL_STAT_NOT_SET)
+ break;
+ sum += values[i].per_lane[j];
+ }
+ if (nla_put_uint(skb, ETHTOOL_A_FEC_HIST_BIN_VAL,
+ values[i].sum == ETHTOOL_STAT_NOT_SET ?
+ sum : values[i].sum))
+ goto err_cancel_hist;
+ if (j && nla_put_64bit(skb, ETHTOOL_A_FEC_HIST_BIN_VAL_PER_LANE,
+ sizeof(u64) * j,
+ values[i].per_lane,
+ ETHTOOL_A_FEC_HIST_PAD))
+ goto err_cancel_hist;
+
+ nla_nest_end(skb, nest);
+ }
+
+ return 0;
+
+err_cancel_hist:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int fec_put_stats(struct sk_buff *skb, const struct fec_reply_data *data)
{
struct nlattr *nest;
@@ -183,6 +251,9 @@ static int fec_put_stats(struct sk_buff *skb, const struct fec_reply_data *data)
data->corr_bits.stats, ETHTOOL_A_FEC_STAT_PAD))
goto err_cancel;
+ if (fec_put_hist(skb, &data->fec_stat_hist))
+ goto err_cancel;
+
nla_nest_end(skb, nest);
return 0;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 71c828d0bf31..fa83ddade4f8 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -617,8 +617,8 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
if (err >= 0) {
- ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL);
- ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF);
}
return err;
}
@@ -708,8 +708,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
__ETHTOOL_LINK_MODE_MASK_NU32;
ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
if (ret >= 0) {
- ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL);
- ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF);
}
return ret;
}
@@ -981,6 +981,7 @@ static int ethtool_rxnfc_copy_to_user(void __user *useraddr,
static bool flow_type_hashable(u32 flow_type)
{
switch (flow_type) {
+ case ETHER_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
@@ -1013,6 +1014,28 @@ static bool flow_type_hashable(u32 flow_type)
return false;
}
+static bool flow_type_v6(u32 flow_type)
+{
+ switch (flow_type) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
+ return true;
+ }
+
+ return false;
+}
+
/* When adding a new type, update the assert and, if it's hashable, add it to
* the flow_type_hashable switch case.
*/
@@ -1026,9 +1049,7 @@ static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh)
*/
if ((input_xfrm != RXH_XFRM_NO_CHANGE &&
input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) &&
- ((rxfh & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) ||
- (!!(rxfh & RXH_IP_SRC) ^ !!(rxfh & RXH_IP_DST)) ||
- (!!(rxfh & RXH_L4_B_0_1) ^ !!(rxfh & RXH_L4_B_2_3))))
+ !ethtool_rxfh_config_is_sym(rxfh))
return -EINVAL;
return 0;
@@ -1037,22 +1058,24 @@ static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh)
static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct ethtool_rxnfc info = {
- .cmd = ETHTOOL_GRXFH,
- };
int err;
u32 i;
+ if (!input_xfrm || input_xfrm == RXH_XFRM_NO_CHANGE)
+ return 0;
+
for (i = 0; i < __FLOW_TYPE_COUNT; i++) {
+ struct ethtool_rxfh_fields fields = {
+ .flow_type = i,
+ };
+
if (!flow_type_hashable(i))
continue;
- info.flow_type = i;
- err = ops->get_rxnfc(dev, &info, NULL);
- if (err)
+ if (ops->get_rxfh_fields(dev, &fields))
continue;
- err = ethtool_check_xfrm_rxfh(input_xfrm, info.data);
+ err = ethtool_check_xfrm_rxfh(input_xfrm, fields.data);
if (err)
return err;
}
@@ -1060,6 +1083,92 @@ static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm)
return 0;
}
+static noinline_for_stack int
+ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_fields fields = {};
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
+ int rc;
+
+ if (!ops->set_rxfh_fields)
+ return -EOPNOTSUPP;
+
+ rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
+ if (rc)
+ return rc;
+
+ if (info.data & RXH_IP6_FL && !flow_type_v6(info.flow_type))
+ return -EINVAL;
+
+ if (info.flow_type & FLOW_RSS && info.rss_context &&
+ !ops->rxfh_per_ctx_fields)
+ return -EINVAL;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ if (ops->get_rxfh) {
+ struct ethtool_rxfh_param rxfh = {};
+
+ rc = ops->get_rxfh(dev, &rxfh);
+ if (rc)
+ goto exit_unlock;
+
+ rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data);
+ if (rc)
+ goto exit_unlock;
+ }
+
+ fields.data = info.data;
+ fields.flow_type = info.flow_type & ~FLOW_RSS;
+ if (info.flow_type & FLOW_RSS)
+ fields.rss_context = info.rss_context;
+
+ rc = ops->set_rxfh_fields(dev, &fields, NULL);
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+ if (rc)
+ return rc;
+
+ ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_NTF, fields.rss_context);
+ return 0;
+}
+
+static noinline_for_stack int
+ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
+{
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_fields fields = {};
+ int ret;
+
+ if (!ops->get_rxfh_fields)
+ return -EOPNOTSUPP;
+
+ ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
+ if (ret)
+ return ret;
+
+ if (info.flow_type & FLOW_RSS && info.rss_context &&
+ !ops->rxfh_per_ctx_fields)
+ return -EINVAL;
+
+ fields.flow_type = info.flow_type & ~FLOW_RSS;
+ if (info.flow_type & FLOW_RSS)
+ fields.rss_context = info.rss_context;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ ret = ops->get_rxfh_fields(dev, &fields);
+ mutex_unlock(&dev->ethtool->rss_lock);
+ if (ret < 0)
+ return ret;
+
+ info.data = fields.data;
+
+ return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
+}
+
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
u32 cmd, void __user *useraddr)
{
@@ -1088,18 +1197,6 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
return -EINVAL;
}
- if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
- struct ethtool_rxfh_param rxfh = {};
-
- rc = ops->get_rxfh(dev, &rxfh);
- if (rc)
- return rc;
-
- rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data);
- if (rc)
- return rc;
- }
-
rc = ops->set_rxnfc(dev, &info);
if (rc)
return rc;
@@ -1111,18 +1208,41 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
return 0;
}
+static noinline_for_stack int ethtool_get_rxrings(struct net_device *dev,
+ u32 cmd,
+ void __user *useraddr)
+{
+ struct ethtool_rxnfc info;
+ size_t info_size;
+ int ret;
+
+ info_size = sizeof(info);
+ ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
+ if (ret)
+ return ret;
+
+ ret = ethtool_get_rx_ring_count(dev);
+ if (ret < 0)
+ return ret;
+
+ info.data = ret;
+
+ return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
+}
+
static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
u32 cmd, void __user *useraddr)
{
- struct ethtool_rxnfc info;
- size_t info_size = sizeof(info);
const struct ethtool_ops *ops = dev->ethtool_ops;
- int ret;
+ struct ethtool_rxnfc info;
void *rule_buf = NULL;
+ size_t info_size;
+ int ret;
if (!ops->get_rxnfc)
return -EOPNOTSUPP;
+ info_size = sizeof(info);
ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
if (ret)
return ret;
@@ -1149,8 +1269,8 @@ err_out:
}
static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
- struct ethtool_rxnfc *rx_rings,
- u32 size)
+ int num_rx_rings,
+ u32 size)
{
int i;
@@ -1159,7 +1279,7 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
/* Validate ring indices */
for (i = 0; i < size; i++)
- if (indir[i] >= rx_rings->data)
+ if (indir[i] >= num_rx_rings)
return -EINVAL;
return 0;
@@ -1209,7 +1329,9 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
if (!rxfh.indir)
return -ENOMEM;
+ mutex_lock(&dev->ethtool->rss_lock);
ret = dev->ethtool_ops->get_rxfh(dev, &rxfh);
+ mutex_unlock(&dev->ethtool->rss_lock);
if (ret)
goto out;
if (copy_to_user(useraddr +
@@ -1228,13 +1350,12 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
struct ethtool_rxfh_param rxfh_dev = {};
struct netlink_ext_ack *extack = NULL;
- struct ethtool_rxnfc rx_rings;
+ int num_rx_rings;
u32 user_size, i;
int ret;
u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
- if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
- !ops->get_rxnfc)
+ if (!ops->get_rxfh_indir_size || !ops->set_rxfh)
return -EOPNOTSUPP;
rxfh_dev.indir_size = ops->get_rxfh_indir_size(dev);
@@ -1254,29 +1375,32 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
if (!rxfh_dev.indir)
return -ENOMEM;
- rx_rings.cmd = ETHTOOL_GRXRINGS;
- ret = ops->get_rxnfc(dev, &rx_rings, NULL);
- if (ret)
+ num_rx_rings = ethtool_get_rx_ring_count(dev);
+ if (num_rx_rings < 0) {
+ ret = num_rx_rings;
goto out;
+ }
if (user_size == 0) {
u32 *indir = rxfh_dev.indir;
for (i = 0; i < rxfh_dev.indir_size; i++)
- indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx_rings);
} else {
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + ringidx_offset,
- &rx_rings,
+ num_rx_rings,
rxfh_dev.indir_size);
if (ret)
goto out;
}
rxfh_dev.hfunc = ETH_RSS_HASH_NO_CHANGE;
+
+ mutex_lock(&dev->ethtool->rss_lock);
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
if (ret)
- goto out;
+ goto out_unlock;
/* indicate whether rxfh was set to default */
if (user_size == 0)
@@ -1284,6 +1408,8 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
else
dev->priv_flags |= IFF_RXFH_CONFIGURED;
+out_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
out:
kfree(rxfh_dev.indir);
return ret;
@@ -1319,8 +1445,7 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
- ops->create_rxfh_context))
+ if (rxfh.rss_context && !ops->create_rxfh_context)
return -EOPNOTSUPP;
rxfh.indir_size = rxfh_dev.indir_size;
@@ -1344,6 +1469,7 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
if (user_key_size)
rxfh_dev.key = rss_config + indir_bytes;
+ mutex_lock(&dev->ethtool->rss_lock);
if (rxfh.rss_context) {
ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
if (!ctx) {
@@ -1389,45 +1515,12 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
ret = -EFAULT;
}
out:
+ mutex_unlock(&dev->ethtool->rss_lock);
kfree(rss_config);
return ret;
}
-static struct ethtool_rxfh_context *
-ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
- u32 indir_size, u32 key_size)
-{
- size_t indir_bytes, flex_len, key_off, size;
- struct ethtool_rxfh_context *ctx;
- u32 priv_bytes, indir_max;
- u16 key_max;
-
- key_max = max(key_size, ops->rxfh_key_space);
- indir_max = max(indir_size, ops->rxfh_indir_space);
-
- priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
- indir_bytes = array_size(indir_max, sizeof(u32));
-
- key_off = size_add(priv_bytes, indir_bytes);
- flex_len = size_add(key_off, key_max);
- size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
-
- ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
- if (!ctx)
- return NULL;
-
- ctx->indir_size = indir_size;
- ctx->key_size = key_size;
- ctx->key_off = key_off;
- ctx->priv_size = ops->rxfh_priv_size;
-
- ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
- ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
-
- return ctx;
-}
-
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
@@ -1438,14 +1531,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param rxfh_dev = {};
struct ethtool_rxfh_context *ctx = NULL;
struct netlink_ext_ack *extack = NULL;
- struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
- bool locked = false; /* dev->ethtool->rss_lock taken */
bool create = false;
+ int num_rx_rings;
u8 *rss_config;
+ int ntf = 0;
int ret;
- if (!ops->get_rxnfc || !ops->set_rxfh)
+ if (!ops->set_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
@@ -1460,8 +1553,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd32)
return -EINVAL;
/* Most drivers don't handle rss_context, check it's 0 as well */
- if (rxfh.rss_context && !(ops->cap_rss_ctx_supported ||
- ops->create_rxfh_context))
+ if (rxfh.rss_context && !ops->create_rxfh_context)
return -EOPNOTSUPP;
/* Check input data transformation capabilities */
if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR &&
@@ -1489,10 +1581,6 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
return -EINVAL;
- ret = ethtool_check_flow_types(dev, rxfh.input_xfrm);
- if (ret)
- return ret;
-
indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
/* Check settings which may be global rather than per RSS-context */
@@ -1506,10 +1594,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
if (!rss_config)
return -ENOMEM;
- rx_rings.cmd = ETHTOOL_GRXRINGS;
- ret = ops->get_rxnfc(dev, &rx_rings, NULL);
- if (ret)
- goto out;
+ num_rx_rings = ethtool_get_rx_ring_count(dev);
+ if (num_rx_rings < 0) {
+ ret = num_rx_rings;
+ goto out_free;
+ }
/* rxfh.indir_size == 0 means reset the indir table to default (master
* context) or delete the context (other RSS contexts).
@@ -1522,10 +1611,10 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
useraddr + rss_cfg_offset,
- &rx_rings,
+ num_rx_rings,
rxfh.indir_size);
if (ret)
- goto out;
+ goto out_free;
} else if (rxfh.indir_size == 0) {
if (rxfh.rss_context == 0) {
u32 *indir;
@@ -1534,7 +1623,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.indir_size = dev_indir_size;
indir = rxfh_dev.indir;
for (i = 0; i < dev_indir_size; i++)
- indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+ indir[i] =
+ ethtool_rxfh_indir_default(i, num_rx_rings);
} else {
rxfh_dev.rss_delete = true;
}
@@ -1547,86 +1637,81 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT;
- goto out;
+ goto out_free;
}
}
- if (rxfh.rss_context) {
- mutex_lock(&dev->ethtool->rss_lock);
- locked = true;
- }
+ mutex_lock(&dev->ethtool->rss_lock);
+
+ ret = ethtool_check_flow_types(dev, rxfh.input_xfrm);
+ if (ret)
+ goto out_unlock;
if (rxfh.rss_context && rxfh_dev.rss_delete) {
ret = ethtool_check_rss_ctx_busy(dev, rxfh.rss_context);
if (ret)
- goto out;
+ goto out_unlock;
}
if (create) {
+ u32 limit, ctx_id;
+
if (rxfh_dev.rss_delete) {
ret = -EINVAL;
- goto out;
+ goto out_unlock;
}
ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size);
if (!ctx) {
ret = -ENOMEM;
- goto out;
+ goto out_unlock;
}
- if (ops->create_rxfh_context) {
- u32 limit = ops->rxfh_max_num_contexts ?: U32_MAX;
- u32 ctx_id;
-
- /* driver uses new API, core allocates ID */
- ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
- XA_LIMIT(1, limit - 1),
- GFP_KERNEL_ACCOUNT);
- if (ret < 0) {
- kfree(ctx);
- goto out;
- }
- WARN_ON(!ctx_id); /* can't happen */
- rxfh.rss_context = ctx_id;
+ limit = ops->rxfh_max_num_contexts ?: U32_MAX;
+ ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx,
+ XA_LIMIT(1, limit - 1), GFP_KERNEL_ACCOUNT);
+ if (ret < 0) {
+ kfree(ctx);
+ goto out_unlock;
}
+ WARN_ON(!ctx_id); /* can't happen */
+ rxfh.rss_context = ctx_id;
} else if (rxfh.rss_context) {
ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context);
if (!ctx) {
ret = -ENOENT;
- goto out;
+ goto out_unlock;
}
}
rxfh_dev.hfunc = rxfh.hfunc;
rxfh_dev.rss_context = rxfh.rss_context;
rxfh_dev.input_xfrm = rxfh.input_xfrm;
- if (rxfh.rss_context && ops->create_rxfh_context) {
- if (create) {
- ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
- extack);
- /* Make sure driver populates defaults */
- WARN_ON_ONCE(!ret && !rxfh_dev.key &&
- ops->rxfh_per_ctx_key &&
- !memchr_inv(ethtool_rxfh_context_key(ctx),
- 0, ctx->key_size));
- } else if (rxfh_dev.rss_delete) {
- ret = ops->remove_rxfh_context(dev, ctx,
- rxfh.rss_context,
- extack);
- } else {
- ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
- extack);
- }
- } else {
+ if (!rxfh.rss_context) {
+ ntf = ETHTOOL_MSG_RSS_NTF;
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
+ } else if (create) {
+ ntf = ETHTOOL_MSG_RSS_CREATE_NTF;
+ ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, extack);
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!ret && !rxfh_dev.key && ops->rxfh_per_ctx_key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx), 0,
+ ctx->key_size));
+ } else if (rxfh_dev.rss_delete) {
+ ntf = ETHTOOL_MSG_RSS_DELETE_NTF;
+ ret = ops->remove_rxfh_context(dev, ctx, rxfh.rss_context,
+ extack);
+ } else {
+ ntf = ETHTOOL_MSG_RSS_NTF;
+ ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, extack);
}
if (ret) {
+ ntf = 0;
if (create) {
/* failed to create, free our new tracking entry */
- if (ops->create_rxfh_context)
- xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
+ xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
kfree(ctx);
}
- goto out;
+ goto out_unlock;
}
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
@@ -1641,36 +1726,6 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
dev->priv_flags |= IFF_RXFH_CONFIGURED;
}
/* Update rss_ctx tracking */
- if (create && !ops->create_rxfh_context) {
- /* driver uses old API, it chose context ID */
- if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) {
- /* context ID reused, our tracking is screwed */
- kfree(ctx);
- goto out;
- }
- /* Allocate the exact ID the driver gave us */
- if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context,
- ctx, GFP_KERNEL))) {
- kfree(ctx);
- goto out;
- }
-
- /* Fetch the defaults for the old API, in the new API drivers
- * should write defaults into ctx themselves.
- */
- rxfh_dev.indir = (u32 *)rss_config;
- rxfh_dev.indir_size = dev_indir_size;
-
- rxfh_dev.key = rss_config + indir_bytes;
- rxfh_dev.key_size = dev_key_size;
-
- ret = ops->get_rxfh(dev, &rxfh_dev);
- if (WARN_ON(ret)) {
- xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
- kfree(ctx);
- goto out;
- }
- }
if (rxfh_dev.rss_delete) {
WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
kfree(ctx);
@@ -1693,10 +1748,12 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
ctx->input_xfrm = rxfh_dev.input_xfrm;
}
-out:
- if (locked)
- mutex_unlock(&dev->ethtool->rss_lock);
+out_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+out_free:
kfree(rss_config);
+ if (ntf)
+ ethtool_rss_notify(dev, ntf, rxfh.rss_context);
return ret;
}
@@ -1808,7 +1865,7 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return ret;
dev->ethtool->wol_enabled = !!wol.wolopts;
- ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF);
return 0;
}
@@ -1884,7 +1941,7 @@ static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
eee_to_keee(&keee, &eee);
ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF);
return ret;
}
@@ -2124,7 +2181,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce,
NULL);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF);
return ret;
}
@@ -2168,7 +2225,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, NULL);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF);
return ret;
}
@@ -2235,7 +2292,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
ret = dev->ethtool_ops->set_channels(dev, &channels);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF);
return ret;
}
@@ -2266,7 +2323,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF);
return ret;
}
@@ -3268,7 +3325,7 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr,
rc = ethtool_set_value_void(dev, useraddr,
dev->ethtool_ops->set_msglevel);
if (!rc)
- ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF);
break;
case ETHTOOL_GEEE:
rc = ethtool_get_eee(dev, useraddr);
@@ -3332,20 +3389,26 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr,
rc = ethtool_get_value(dev, useraddr, ethcmd,
dev->ethtool_ops->get_priv_flags);
if (!rc)
- ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF);
break;
case ETHTOOL_SPFLAGS:
rc = ethtool_set_value(dev, useraddr,
dev->ethtool_ops->set_priv_flags);
break;
case ETHTOOL_GRXFH:
+ rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr);
+ break;
+ case ETHTOOL_SRXFH:
+ rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr);
+ break;
case ETHTOOL_GRXRINGS:
+ rc = ethtool_get_rxrings(dev, ethcmd, useraddr);
+ break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
break;
- case ETHTOOL_SRXFH:
case ETHTOOL_SRXCLSRLDEL:
case ETHTOOL_SRXCLSRLINS:
rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 9de828df46cd..2f813f25f07e 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -81,6 +81,12 @@ static void ethnl_sock_priv_destroy(void *priv)
}
}
+u32 ethnl_bcast_seq_next(void)
+{
+ ASSERT_RTNL();
+ return ++ethnl_bcast_seq;
+}
+
int ethnl_ops_begin(struct net_device *dev)
{
int ret;
@@ -405,6 +411,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_PSE_GET] = &ethnl_pse_request_ops,
[ETHTOOL_MSG_PSE_SET] = &ethnl_pse_request_ops,
[ETHTOOL_MSG_RSS_GET] = &ethnl_rss_request_ops,
+ [ETHTOOL_MSG_RSS_SET] = &ethnl_rss_request_ops,
[ETHTOOL_MSG_PLCA_GET_CFG] = &ethnl_plca_cfg_request_ops,
[ETHTOOL_MSG_PLCA_SET_CFG] = &ethnl_plca_cfg_request_ops,
[ETHTOOL_MSG_PLCA_GET_STATUS] = &ethnl_plca_status_request_ops,
@@ -455,10 +462,15 @@ static int ethnl_default_parse(struct ethnl_req_info *req_info,
if (request_ops->parse_request) {
ret = request_ops->parse_request(req_info, tb, info->extack);
if (ret < 0)
- return ret;
+ goto err_dev;
}
return 0;
+
+err_dev:
+ netdev_put(req_info->dev, &req_info->dev_tracker);
+ req_info->dev = NULL;
+ return ret;
}
/**
@@ -508,7 +520,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
ret = ethnl_default_parse(req_info, info, ops, !ops->allow_nodev_do);
if (ret < 0)
- goto err_dev;
+ goto err_free;
ethnl_init_reply_data(reply_data, ops, req_info->dev);
rtnl_lock();
@@ -554,6 +566,7 @@ err_cleanup:
ops->cleanup_data(reply_data);
err_dev:
netdev_put(req_info->dev, &req_info->dev_tracker);
+err_free:
kfree(reply_data);
kfree(req_info);
return ret;
@@ -656,6 +669,8 @@ static int ethnl_default_start(struct netlink_callback *cb)
}
ret = ethnl_default_parse(req_info, &info->info, ops, false);
+ if (ret < 0)
+ goto free_reply_data;
if (req_info->dev) {
/* We ignore device specification in dump requests but as the
* same parser as for non-dump (doit) requests is used, it
@@ -664,8 +679,6 @@ static int ethnl_default_start(struct netlink_callback *cb)
netdev_put(req_info->dev, &req_info->dev_tracker);
req_info->dev = NULL;
}
- if (ret < 0)
- goto free_reply_data;
ctx->ops = ops;
ctx->req_info = req_info;
@@ -714,13 +727,13 @@ static int ethnl_perphy_start(struct netlink_callback *cb)
* the dev's ifindex, .dumpit() will grab and release the netdev itself.
*/
ret = ethnl_default_parse(req_info, &info->info, ops, false);
+ if (ret < 0)
+ goto free_reply_data;
if (req_info->dev) {
phy_ctx->ifindex = req_info->dev->ifindex;
netdev_put(req_info->dev, &req_info->dev_tracker);
req_info->dev = NULL;
}
- if (ret < 0)
- goto free_reply_data;
ctx->ops = ops;
ctx->req_info = req_info;
@@ -863,8 +876,8 @@ static int ethnl_default_done(struct netlink_callback *cb)
static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
{
const struct ethnl_request_ops *ops;
- struct ethnl_req_info req_info = {};
const u8 cmd = info->genlhdr->cmd;
+ struct ethnl_req_info *req_info;
struct net_device *dev;
int ret;
@@ -874,20 +887,22 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr))
return -EINVAL;
- ret = ethnl_parse_header_dev_get(&req_info, info->attrs[ops->hdr_attr],
- genl_info_net(info), info->extack,
- true);
+ req_info = kzalloc(ops->req_info_size, GFP_KERNEL);
+ if (!req_info)
+ return -ENOMEM;
+
+ ret = ethnl_default_parse(req_info, info, ops, true);
if (ret < 0)
- return ret;
+ goto out_free_req;
if (ops->set_validate) {
- ret = ops->set_validate(&req_info, info);
+ ret = ops->set_validate(req_info, info);
/* 0 means nothing to do */
if (ret <= 0)
goto out_dev;
}
- dev = req_info.dev;
+ dev = req_info->dev;
rtnl_lock();
netdev_lock_ops(dev);
@@ -902,14 +917,14 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_free_cfg;
- ret = ops->set(&req_info, info);
+ ret = ops->set(req_info, info);
if (ret < 0)
goto out_ops;
swap(dev->cfg, dev->cfg_pending);
if (!ret)
goto out_ops;
- ethtool_notify(dev, ops->set_ntf_cmd, NULL);
+ ethnl_notify(dev, ops->set_ntf_cmd, req_info);
ret = 0;
out_ops:
@@ -921,7 +936,9 @@ out_tie_cfg:
netdev_unlock_ops(dev);
rtnl_unlock();
out_dev:
- ethnl_parse_header_dev_put(&req_info);
+ ethnl_parse_header_dev_put(req_info);
+out_free_req:
+ kfree(req_info);
return ret;
}
@@ -942,11 +959,13 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_MODULE_NTF] = &ethnl_module_request_ops,
[ETHTOOL_MSG_PLCA_NTF] = &ethnl_plca_cfg_request_ops,
[ETHTOOL_MSG_MM_NTF] = &ethnl_mm_request_ops,
+ [ETHTOOL_MSG_RSS_NTF] = &ethnl_rss_request_ops,
+ [ETHTOOL_MSG_RSS_CREATE_NTF] = &ethnl_rss_request_ops,
};
/* default notification handler */
static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+ const struct ethnl_req_info *orig_req_info)
{
struct ethnl_reply_data *reply_data;
const struct ethnl_request_ops *ops;
@@ -975,6 +994,11 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
req_info->dev = dev;
req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS;
+ if (orig_req_info) {
+ req_info->phy_index = orig_req_info->phy_index;
+ memcpy(&req_info[1], &orig_req_info[1],
+ ops->req_info_size - sizeof(*req_info));
+ }
netdev_ops_assert_locked(dev);
@@ -1025,7 +1049,7 @@ err_rep:
/* notifications */
typedef void (*ethnl_notify_handler_t)(struct net_device *dev, unsigned int cmd,
- const void *data);
+ const struct ethnl_req_info *req_info);
static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_LINKINFO_NTF] = ethnl_default_notify,
@@ -1043,9 +1067,12 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_MM_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RSS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RSS_CREATE_NTF] = ethnl_default_notify,
};
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
+void ethnl_notify(struct net_device *dev, unsigned int cmd,
+ const struct ethnl_req_info *req_info)
{
if (unlikely(!ethnl_ok))
return;
@@ -1053,18 +1080,23 @@ void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
if (likely(cmd < ARRAY_SIZE(ethnl_notify_handlers) &&
ethnl_notify_handlers[cmd]))
- ethnl_notify_handlers[cmd](dev, cmd, data);
+ ethnl_notify_handlers[cmd](dev, cmd, req_info);
else
WARN_ONCE(1, "notification %u not implemented (dev=%s)\n",
cmd, netdev_name(dev));
}
+
+void ethtool_notify(struct net_device *dev, unsigned int cmd)
+{
+ ethnl_notify(dev, cmd, NULL);
+}
EXPORT_SYMBOL(ethtool_notify);
static void ethnl_notify_features(struct netdev_notifier_info *info)
{
struct net_device *dev = netdev_notifier_info_to_dev(info);
- ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF);
}
static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
@@ -1481,6 +1513,27 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_tsconfig_set_policy,
.maxattr = ARRAY_SIZE(ethnl_tsconfig_set_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_RSS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_default_set_doit,
+ .policy = ethnl_rss_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rss_set_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RSS_CREATE_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_rss_create_doit,
+ .policy = ethnl_rss_create_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rss_create_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RSS_DELETE_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_rss_delete_doit,
+ .policy = ethnl_rss_delete_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rss_delete_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 91b953924af3..1d4f9ecb3d26 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -10,6 +10,7 @@
struct ethnl_req_info;
+u32 ethnl_bcast_seq_next(void);
int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
const struct nlattr *nest, struct net *net,
struct netlink_ext_ack *extack,
@@ -23,6 +24,8 @@ void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd);
void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd);
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev);
+void ethnl_notify(struct net_device *dev, unsigned int cmd,
+ const struct ethnl_req_info *req_info);
/**
* ethnl_strz_size() - calculate attribute length for fixed size string
@@ -337,6 +340,8 @@ int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
* header is already filled on entry, the rest up to @repdata_offset
* is zero initialized. This callback should only modify type specific
* request info by parsed attributes from request message.
+ * Called for both GET and SET. Information parsed for SET will
+ * be conveyed to the req_info used during NTF generation.
* @prepare_data:
* Retrieve and prepare data needed to compose a reply message. Calls to
* ethtool_ops handlers are limited to this callback. Common reply data
@@ -463,7 +468,7 @@ extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMB
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
extern const struct nla_policy ethnl_coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1];
extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_STATS_SRC + 1];
-extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_TX + 1];
+extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_STATS_SRC + 1];
extern const struct nla_policy ethnl_eee_get_policy[ETHTOOL_A_EEE_HEADER + 1];
extern const struct nla_policy ethnl_eee_set_policy[ETHTOOL_A_EEE_TX_LPI_TIMER + 1];
extern const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1];
@@ -480,6 +485,9 @@ extern const struct nla_policy ethnl_module_set_policy[ETHTOOL_A_MODULE_POWER_MO
extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_START_CONTEXT + 1];
+extern const struct nla_policy ethnl_rss_set_policy[ETHTOOL_A_RSS_FLOW_HASH + 1];
+extern const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1];
+extern const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1];
extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1];
extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
@@ -502,6 +510,8 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_tsinfo_start(struct netlink_callback *cb);
int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_tsinfo_done(struct netlink_callback *cb);
+int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info);
+int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index f7c847aeb1a2..0f9af1e66548 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -168,6 +168,7 @@ const struct nla_policy ethnl_pause_set_policy[] = {
[ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_RX] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 },
+ [ETHTOOL_A_PAUSE_STATS_SRC] = { .type = NLA_REJECT },
};
static int
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index 4f6b99eab2a6..24def9c9dd54 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -11,6 +11,7 @@
#include "netlink.h"
#include <linux/ethtool_netlink.h>
#include <linux/ethtool.h>
+#include <linux/export.h>
#include <linux/phy.h>
struct pse_req_info {
@@ -83,6 +84,8 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
const struct ethtool_pse_control_status *st = &data->status;
int len = 0;
+ if (st->pw_d_id)
+ len += nla_total_size(sizeof(u32)); /* _PSE_PW_D_ID */
if (st->podl_admin_state > 0)
len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */
if (st->podl_pw_status > 0)
@@ -109,6 +112,9 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
len += st->c33_pw_limit_nb_ranges *
(nla_total_size(0) +
nla_total_size(sizeof(u32)) * 2);
+ if (st->prio_max)
+ /* _PSE_PRIO_MAX + _PSE_PRIO */
+ len += nla_total_size(sizeof(u32)) * 2;
return len;
}
@@ -148,6 +154,11 @@ static int pse_fill_reply(struct sk_buff *skb,
const struct pse_reply_data *data = PSE_REPDATA(reply_base);
const struct ethtool_pse_control_status *st = &data->status;
+ if (st->pw_d_id &&
+ nla_put_u32(skb, ETHTOOL_A_PSE_PW_D_ID,
+ st->pw_d_id))
+ return -EMSGSIZE;
+
if (st->podl_admin_state > 0 &&
nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE,
st->podl_admin_state))
@@ -198,6 +209,11 @@ static int pse_fill_reply(struct sk_buff *skb,
pse_put_pw_limit_ranges(skb, st))
return -EMSGSIZE;
+ if (st->prio_max &&
+ (nla_put_u32(skb, ETHTOOL_A_PSE_PRIO_MAX, st->prio_max) ||
+ nla_put_u32(skb, ETHTOOL_A_PSE_PRIO, st->prio)))
+ return -EMSGSIZE;
+
return 0;
}
@@ -219,6 +235,7 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED),
[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 },
+ [ETHTOOL_A_PSE_PRIO] = { .type = NLA_U32 },
};
static int
@@ -267,6 +284,15 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret)
return ret;
+ if (tb[ETHTOOL_A_PSE_PRIO]) {
+ unsigned int prio;
+
+ prio = nla_get_u32(tb[ETHTOOL_A_PSE_PRIO]);
+ ret = pse_ethtool_set_prio(phydev->psec, info->extack, prio);
+ if (ret)
+ return ret;
+ }
+
if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) {
unsigned int pw_limit;
@@ -315,3 +341,42 @@ const struct ethnl_request_ops ethnl_pse_request_ops = {
.set = ethnl_set_pse,
/* PSE has no notification */
};
+
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notifs)
+{
+ void *reply_payload;
+ struct sk_buff *skb;
+ int reply_len;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (!netdev || !notifs)
+ return;
+
+ reply_len = ethnl_reply_header_size() +
+ nla_total_size(sizeof(u32)); /* _PSE_NTF_EVENTS */
+
+ skb = genlmsg_new(reply_len, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ reply_payload = ethnl_bcastmsg_put(skb, ETHTOOL_MSG_PSE_NTF);
+ if (!reply_payload)
+ goto err_skb;
+
+ ret = ethnl_fill_reply_header(skb, netdev, ETHTOOL_A_PSE_NTF_HEADER);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_PSE_NTF_EVENTS, notifs))
+ goto err_skb;
+
+ genlmsg_end(skb, reply_payload);
+ ethnl_multicast(skb, netdev);
+ return;
+
+err_skb:
+ nlmsg_free(skb);
+}
+EXPORT_SYMBOL_GPL(ethnl_pse_send_ntf);
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 6d9b1769896b..4dced53be4b3 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -12,6 +12,7 @@ struct rss_req_info {
struct rss_reply_data {
struct ethnl_reply_data base;
+ bool has_flow_hash;
bool no_key_fields;
u32 indir_size;
u32 hkey_size;
@@ -19,6 +20,37 @@ struct rss_reply_data {
u32 input_xfrm;
u32 *indir_table;
u8 *hkey;
+ int flow_hash[__ETHTOOL_A_FLOW_CNT];
+};
+
+static const u8 ethtool_rxfh_ft_nl2ioctl[] = {
+ [ETHTOOL_A_FLOW_ETHER] = ETHER_FLOW,
+ [ETHTOOL_A_FLOW_IP4] = IPV4_FLOW,
+ [ETHTOOL_A_FLOW_IP6] = IPV6_FLOW,
+ [ETHTOOL_A_FLOW_TCP4] = TCP_V4_FLOW,
+ [ETHTOOL_A_FLOW_UDP4] = UDP_V4_FLOW,
+ [ETHTOOL_A_FLOW_SCTP4] = SCTP_V4_FLOW,
+ [ETHTOOL_A_FLOW_AH_ESP4] = AH_ESP_V4_FLOW,
+ [ETHTOOL_A_FLOW_TCP6] = TCP_V6_FLOW,
+ [ETHTOOL_A_FLOW_UDP6] = UDP_V6_FLOW,
+ [ETHTOOL_A_FLOW_SCTP6] = SCTP_V6_FLOW,
+ [ETHTOOL_A_FLOW_AH_ESP6] = AH_ESP_V6_FLOW,
+ [ETHTOOL_A_FLOW_AH4] = AH_V4_FLOW,
+ [ETHTOOL_A_FLOW_ESP4] = ESP_V4_FLOW,
+ [ETHTOOL_A_FLOW_AH6] = AH_V6_FLOW,
+ [ETHTOOL_A_FLOW_ESP6] = ESP_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPU4] = GTPU_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPU6] = GTPU_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPC4] = GTPC_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPC6] = GTPC_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPC_TEID4] = GTPC_TEID_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPC_TEID6] = GTPC_TEID_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_EH4] = GTPU_EH_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_EH6] = GTPU_EH_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_UL4] = GTPU_UL_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_UL6] = GTPU_UL_V6_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_DL4] = GTPU_DL_V4_FLOW,
+ [ETHTOOL_A_FLOW_GTPU_DL6] = GTPU_DL_V6_FLOW,
};
#define RSS_REQINFO(__req_base) \
@@ -49,21 +81,43 @@ rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb,
return 0;
}
+static void
+rss_prepare_flow_hash(const struct rss_req_info *req, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ int i;
+
+ data->has_flow_hash = false;
+
+ if (!dev->ethtool_ops->get_rxfh_fields)
+ return;
+ if (req->rss_context && !dev->ethtool_ops->rxfh_per_ctx_fields)
+ return;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) {
+ struct ethtool_rxfh_fields fields = {
+ .flow_type = ethtool_rxfh_ft_nl2ioctl[i],
+ .rss_context = req->rss_context,
+ };
+
+ if (dev->ethtool_ops->get_rxfh_fields(dev, &fields)) {
+ data->flow_hash[i] = -1; /* Unsupported */
+ continue;
+ }
+
+ data->flow_hash[i] = fields.data;
+ data->has_flow_hash = true;
+ }
+ mutex_unlock(&dev->ethtool->rss_lock);
+}
+
static int
-rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
- struct rss_reply_data *data, const struct genl_info *info)
+rss_get_data_alloc(struct net_device *dev, struct rss_reply_data *data)
{
- struct ethtool_rxfh_param rxfh = {};
- const struct ethtool_ops *ops;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
u32 total_size, indir_bytes;
u8 *rss_config;
- int ret;
-
- ops = dev->ethtool_ops;
-
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- return ret;
data->indir_size = 0;
data->hkey_size = 0;
@@ -75,16 +129,39 @@ rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
indir_bytes = data->indir_size * sizeof(u32);
total_size = indir_bytes + data->hkey_size;
rss_config = kzalloc(total_size, GFP_KERNEL);
- if (!rss_config) {
- ret = -ENOMEM;
- goto out_ops;
- }
+ if (!rss_config)
+ return -ENOMEM;
if (data->indir_size)
data->indir_table = (u32 *)rss_config;
if (data->hkey_size)
data->hkey = rss_config + indir_bytes;
+ return 0;
+}
+
+static void rss_get_data_free(const struct rss_reply_data *data)
+{
+ kfree(data->indir_table);
+}
+
+static int
+rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_param rxfh = {};
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&dev->ethtool->rss_lock);
+
+ ret = rss_get_data_alloc(dev, data);
+ if (ret)
+ goto out_unlock;
+
rxfh.indir_size = data->indir_size;
rxfh.indir = data->indir_table;
rxfh.key_size = data->hkey_size;
@@ -92,15 +169,35 @@ rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
ret = ops->get_rxfh(dev, &rxfh);
if (ret)
- goto out_ops;
+ goto out_unlock;
data->hfunc = rxfh.hfunc;
data->input_xfrm = rxfh.input_xfrm;
-out_ops:
+out_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
ethnl_ops_complete(dev);
return ret;
}
+static void
+__rss_prepare_ctx(struct net_device *dev, struct rss_reply_data *data,
+ struct ethtool_rxfh_context *ctx)
+{
+ if (WARN_ON_ONCE(data->indir_size != ctx->indir_size ||
+ data->hkey_size != ctx->key_size))
+ return;
+
+ data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
+
+ data->hfunc = ctx->hfunc;
+ data->input_xfrm = ctx->input_xfrm;
+ memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx),
+ data->indir_size * sizeof(u32));
+ if (data->hkey_size)
+ memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
+ data->hkey_size);
+}
+
static int
rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
struct rss_reply_data *data, const struct genl_info *info)
@@ -108,34 +205,51 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
struct ethtool_rxfh_context *ctx;
u32 total_size, indir_bytes;
u8 *rss_config;
+ int ret;
- data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
-
+ mutex_lock(&dev->ethtool->rss_lock);
ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
- if (!ctx)
- return -ENOENT;
+ if (!ctx) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
data->indir_size = ctx->indir_size;
data->hkey_size = ctx->key_size;
- data->hfunc = ctx->hfunc;
- data->input_xfrm = ctx->input_xfrm;
indir_bytes = data->indir_size * sizeof(u32);
total_size = indir_bytes + data->hkey_size;
rss_config = kzalloc(total_size, GFP_KERNEL);
- if (!rss_config)
- return -ENOMEM;
+ if (!rss_config) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
data->indir_table = (u32 *)rss_config;
- memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), indir_bytes);
-
- if (data->hkey_size) {
+ if (data->hkey_size)
data->hkey = rss_config + indir_bytes;
- memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
- data->hkey_size);
- }
- return 0;
+ __rss_prepare_ctx(dev, data, ctx);
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+ return ret;
+}
+
+static int
+rss_prepare(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ rss_prepare_flow_hash(request, dev, data, info);
+
+ /* Coming from RSS_SET, driver may only have flow_hash_fields ops */
+ if (!dev->ethtool_ops->get_rxfh)
+ return 0;
+
+ if (request->rss_context)
+ return rss_prepare_ctx(request, dev, data, info);
+ return rss_prepare_get(request, dev, data, info);
}
static int
@@ -153,14 +267,10 @@ rss_prepare_data(const struct ethnl_req_info *req_base,
return -EOPNOTSUPP;
/* Some drivers don't handle rss_context */
- if (request->rss_context) {
- if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context)
- return -EOPNOTSUPP;
-
- return rss_prepare_ctx(request, dev, data, info);
- }
+ if (request->rss_context && !ops->create_rxfh_context)
+ return -EOPNOTSUPP;
- return rss_prepare_get(request, dev, data, info);
+ return rss_prepare(request, dev, data, info);
}
static int
@@ -174,7 +284,10 @@ rss_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
- nla_total_size(data->hkey_size); /* _RSS_HKEY */
+ nla_total_size(data->hkey_size) + /* _RSS_HKEY */
+ nla_total_size(0) + /* _RSS_FLOW_HASH */
+ nla_total_size(sizeof(u32)) * ETHTOOL_A_FLOW_MAX +
+ 0;
return len;
}
@@ -195,17 +308,34 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
sizeof(u32) * data->indir_size, data->indir_table)))
return -EMSGSIZE;
- if (data->no_key_fields)
- return 0;
-
- if ((data->hfunc &&
- nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
- (data->input_xfrm &&
- nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) ||
- (data->hkey_size &&
- nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))
+ if (!data->no_key_fields &&
+ ((data->hfunc &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
+ (data->input_xfrm &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) ||
+ (data->hkey_size &&
+ nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))))
return -EMSGSIZE;
+ if (data->has_flow_hash) {
+ struct nlattr *nest;
+ int i;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_RSS_FLOW_HASH);
+ if (!nest)
+ return -EMSGSIZE;
+
+ for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) {
+ if (data->flow_hash[i] >= 0 &&
+ nla_put_uint(skb, i, data->flow_hash[i])) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+ }
+
+ nla_nest_end(skb, nest);
+ }
+
return 0;
}
@@ -213,7 +343,7 @@ static void rss_cleanup_data(struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
- kfree(data->indir_table);
+ rss_get_data_free(data);
}
struct rss_nl_dump_ctx {
@@ -284,11 +414,7 @@ rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb,
if (ret < 0)
goto err_cancel;
- /* Context 0 is not currently storred or cached in the XArray */
- if (!rss_context)
- ret = rss_prepare_get(&req, dev, &data, info);
- else
- ret = rss_prepare_ctx(&req, dev, &data, info);
+ ret = rss_prepare(&req, dev, &data, info);
if (ret)
goto err_cancel;
@@ -358,6 +484,434 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
return ret;
}
+/* RSS_NTF */
+
+static void ethnl_rss_delete_notify(struct net_device *dev, u32 rss_context)
+{
+ struct sk_buff *ntf;
+ size_t ntf_size;
+ void *hdr;
+
+ ntf_size = ethnl_reply_header_size() +
+ nla_total_size(sizeof(u32)); /* _RSS_CONTEXT */
+
+ ntf = genlmsg_new(ntf_size, GFP_KERNEL);
+ if (!ntf)
+ goto out_warn;
+
+ hdr = ethnl_bcastmsg_put(ntf, ETHTOOL_MSG_RSS_DELETE_NTF);
+ if (!hdr)
+ goto out_free_ntf;
+
+ if (ethnl_fill_reply_header(ntf, dev, ETHTOOL_A_RSS_HEADER) ||
+ nla_put_u32(ntf, ETHTOOL_A_RSS_CONTEXT, rss_context))
+ goto out_free_ntf;
+
+ genlmsg_end(ntf, hdr);
+ if (ethnl_multicast(ntf, dev))
+ goto out_warn;
+
+ return;
+
+out_free_ntf:
+ nlmsg_free(ntf);
+out_warn:
+ pr_warn_once("Failed to send a RSS delete notification");
+}
+
+void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context)
+{
+ struct rss_req_info req_info = {
+ .rss_context = rss_context,
+ };
+
+ if (type == ETHTOOL_MSG_RSS_DELETE_NTF)
+ ethnl_rss_delete_notify(dev, rss_context);
+ else
+ ethnl_notify(dev, type, &req_info.base);
+}
+
+/* RSS_SET */
+
+#define RFH_MASK (RXH_L2DA | RXH_VLAN | RXH_IP_SRC | RXH_IP_DST | \
+ RXH_L3_PROTO | RXH_L4_B_0_1 | RXH_L4_B_2_3 | \
+ RXH_GTP_TEID | RXH_DISCARD)
+#define RFH_MASKv6 (RFH_MASK | RXH_IP6_FL)
+
+static const struct nla_policy ethnl_rss_flows_policy[] = {
+ [ETHTOOL_A_FLOW_ETHER] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_IP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_IP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_TCP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_UDP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_SCTP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_AH_ESP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_TCP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_UDP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_SCTP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_AH_ESP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_AH4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_ESP4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_AH6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_ESP6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPU4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPU6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPC4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPC6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPC_TEID4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPC_TEID6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPU_EH4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPU_EH6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPU_UL4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPU_UL6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+ [ETHTOOL_A_FLOW_GTPU_DL4] = NLA_POLICY_MASK(NLA_UINT, RFH_MASK),
+ [ETHTOOL_A_FLOW_GTPU_DL6] = NLA_POLICY_MASK(NLA_UINT, RFH_MASKv6),
+};
+
+const struct nla_policy ethnl_rss_set_policy[ETHTOOL_A_RSS_FLOW_HASH + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32, },
+ [ETHTOOL_A_RSS_HFUNC] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RSS_INDIR] = { .type = NLA_BINARY, },
+ [ETHTOOL_A_RSS_HKEY] = NLA_POLICY_MIN(NLA_BINARY, 1),
+ [ETHTOOL_A_RSS_INPUT_XFRM] =
+ NLA_POLICY_MAX(NLA_U32, RXH_XFRM_SYM_OR_XOR),
+ [ETHTOOL_A_RSS_FLOW_HASH] = NLA_POLICY_NESTED(ethnl_rss_flows_policy),
+};
+
+static int
+ethnl_rss_set_validate(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = req_info->dev->ethtool_ops;
+ struct rss_req_info *request = RSS_REQINFO(req_info);
+ struct nlattr **tb = info->attrs;
+ struct nlattr *bad_attr = NULL;
+ u32 input_xfrm;
+
+ if (request->rss_context && !ops->create_rxfh_context)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_CONTEXT];
+
+ if (request->rss_context && !ops->rxfh_per_ctx_key) {
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HFUNC];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HKEY];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+ }
+
+ input_xfrm = nla_get_u32_default(tb[ETHTOOL_A_RSS_INPUT_XFRM], 0);
+ if (input_xfrm & ~ops->supported_input_xfrm)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+
+ if (tb[ETHTOOL_A_RSS_FLOW_HASH] && !ops->set_rxfh_fields)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_FLOW_HASH];
+ if (request->rss_context &&
+ tb[ETHTOOL_A_RSS_FLOW_HASH] && !ops->rxfh_per_ctx_fields)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_FLOW_HASH];
+
+ if (bad_attr) {
+ NL_SET_BAD_ATTR(info->extack, bad_attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 1;
+}
+
+static int
+rss_set_prep_indir(struct net_device *dev, struct genl_info *info,
+ struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh,
+ bool *reset, bool *mod)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct nlattr **tb = info->attrs;
+ size_t alloc_size;
+ int num_rx_rings;
+ u32 user_size;
+ int i, err;
+
+ if (!tb[ETHTOOL_A_RSS_INDIR])
+ return 0;
+ if (!data->indir_size)
+ return -EOPNOTSUPP;
+
+ err = ethtool_get_rx_ring_count(dev);
+ if (err < 0)
+ return err;
+ num_rx_rings = err;
+
+ if (nla_len(tb[ETHTOOL_A_RSS_INDIR]) % 4) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_INDIR]);
+ return -EINVAL;
+ }
+ user_size = nla_len(tb[ETHTOOL_A_RSS_INDIR]) / 4;
+ if (!user_size) {
+ if (rxfh->rss_context) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_RSS_INDIR],
+ "can't reset table for a context");
+ return -EINVAL;
+ }
+ *reset = true;
+ } else if (data->indir_size % user_size) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, tb[ETHTOOL_A_RSS_INDIR],
+ "size (%d) mismatch with device indir table (%d)",
+ user_size, data->indir_size);
+ return -EINVAL;
+ }
+
+ rxfh->indir_size = data->indir_size;
+ alloc_size = array_size(data->indir_size, sizeof(rxfh->indir[0]));
+ rxfh->indir = kzalloc(alloc_size, GFP_KERNEL);
+ if (!rxfh->indir)
+ return -ENOMEM;
+
+ nla_memcpy(rxfh->indir, tb[ETHTOOL_A_RSS_INDIR], alloc_size);
+ for (i = 0; i < user_size; i++) {
+ if (rxfh->indir[i] < num_rx_rings)
+ continue;
+
+ NL_SET_ERR_MSG_ATTR_FMT(extack, tb[ETHTOOL_A_RSS_INDIR],
+ "entry %d: queue out of range (%d)",
+ i, rxfh->indir[i]);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (user_size) {
+ /* Replicate the user-provided table to fill the device table */
+ for (i = user_size; i < data->indir_size; i++)
+ rxfh->indir[i] = rxfh->indir[i % user_size];
+ } else {
+ for (i = 0; i < data->indir_size; i++)
+ rxfh->indir[i] =
+ ethtool_rxfh_indir_default(i, num_rx_rings);
+ }
+
+ *mod |= memcmp(rxfh->indir, data->indir_table, data->indir_size);
+
+ return 0;
+
+err_free:
+ kfree(rxfh->indir);
+ rxfh->indir = NULL;
+ return err;
+}
+
+static int
+rss_set_prep_hkey(struct net_device *dev, struct genl_info *info,
+ struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh,
+ bool *mod)
+{
+ struct nlattr **tb = info->attrs;
+
+ if (!tb[ETHTOOL_A_RSS_HKEY])
+ return 0;
+
+ if (nla_len(tb[ETHTOOL_A_RSS_HKEY]) != data->hkey_size) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_HKEY]);
+ return -EINVAL;
+ }
+
+ rxfh->key_size = data->hkey_size;
+ rxfh->key = kmemdup(data->hkey, data->hkey_size, GFP_KERNEL);
+ if (!rxfh->key)
+ return -ENOMEM;
+
+ ethnl_update_binary(rxfh->key, rxfh->key_size, tb[ETHTOOL_A_RSS_HKEY],
+ mod);
+ return 0;
+}
+
+static int
+rss_check_rxfh_fields_sym(struct net_device *dev, struct genl_info *info,
+ struct rss_reply_data *data, bool xfrm_sym)
+{
+ struct nlattr **tb = info->attrs;
+ int i;
+
+ if (!xfrm_sym)
+ return 0;
+ if (!data->has_flow_hash) {
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_RSS_INPUT_XFRM],
+ "hash field config not reported");
+ return -EINVAL;
+ }
+
+ for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++)
+ if (data->flow_hash[i] >= 0 &&
+ !ethtool_rxfh_config_is_sym(data->flow_hash[i])) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RSS_INPUT_XFRM],
+ "hash field config is not symmetric");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ethnl_set_rss_fields(struct net_device *dev, struct genl_info *info,
+ u32 rss_context, struct rss_reply_data *data,
+ bool xfrm_sym, bool *mod)
+{
+ struct nlattr *flow_nest = info->attrs[ETHTOOL_A_RSS_FLOW_HASH];
+ struct nlattr *flows[ETHTOOL_A_FLOW_MAX + 1];
+ const struct ethtool_ops *ops;
+ int i, ret;
+
+ ops = dev->ethtool_ops;
+
+ ret = rss_check_rxfh_fields_sym(dev, info, data, xfrm_sym);
+ if (ret)
+ return ret;
+
+ if (!flow_nest)
+ return 0;
+
+ ret = nla_parse_nested(flows, ARRAY_SIZE(ethnl_rss_flows_policy) - 1,
+ flow_nest, ethnl_rss_flows_policy, info->extack);
+ if (ret < 0)
+ return ret;
+
+ for (i = 1; i < __ETHTOOL_A_FLOW_CNT; i++) {
+ struct ethtool_rxfh_fields fields = {
+ .flow_type = ethtool_rxfh_ft_nl2ioctl[i],
+ .rss_context = rss_context,
+ };
+
+ if (!flows[i])
+ continue;
+
+ fields.data = nla_get_u32(flows[i]);
+ if (data->has_flow_hash && data->flow_hash[i] == fields.data)
+ continue;
+
+ if (xfrm_sym && !ethtool_rxfh_config_is_sym(fields.data)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, flows[i],
+ "conflict with xfrm-input");
+ return -EINVAL;
+ }
+
+ ret = ops->set_rxfh_fields(dev, &fields, info->extack);
+ if (ret)
+ return ret;
+
+ *mod = true;
+ }
+
+ return 0;
+}
+
+static void
+rss_set_ctx_update(struct ethtool_rxfh_context *ctx, struct nlattr **tb,
+ struct rss_reply_data *data, struct ethtool_rxfh_param *rxfh)
+{
+ int i;
+
+ if (rxfh->indir) {
+ for (i = 0; i < data->indir_size; i++)
+ ethtool_rxfh_context_indir(ctx)[i] = rxfh->indir[i];
+ ctx->indir_configured = !!nla_len(tb[ETHTOOL_A_RSS_INDIR]);
+ }
+ if (rxfh->key) {
+ memcpy(ethtool_rxfh_context_key(ctx), rxfh->key,
+ data->hkey_size);
+ ctx->key_configured = !!rxfh->key_size;
+ }
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = rxfh->hfunc;
+ if (rxfh->input_xfrm != RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = rxfh->input_xfrm;
+}
+
+static int
+ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info)
+{
+ bool indir_reset = false, indir_mod, xfrm_sym = false;
+ struct rss_req_info *request = RSS_REQINFO(req_info);
+ struct ethtool_rxfh_context *ctx = NULL;
+ struct net_device *dev = req_info->dev;
+ bool mod = false, fields_mod = false;
+ struct ethtool_rxfh_param rxfh = {};
+ struct nlattr **tb = info->attrs;
+ struct rss_reply_data data = {};
+ const struct ethtool_ops *ops;
+ int ret;
+
+ ops = dev->ethtool_ops;
+ data.base.dev = dev;
+
+ ret = rss_prepare(request, dev, &data, info);
+ if (ret)
+ return ret;
+
+ rxfh.rss_context = request->rss_context;
+
+ ret = rss_set_prep_indir(dev, info, &data, &rxfh, &indir_reset, &mod);
+ if (ret)
+ goto exit_clean_data;
+ indir_mod = !!tb[ETHTOOL_A_RSS_INDIR];
+
+ rxfh.hfunc = data.hfunc;
+ ethnl_update_u8(&rxfh.hfunc, tb[ETHTOOL_A_RSS_HFUNC], &mod);
+ if (rxfh.hfunc == data.hfunc)
+ rxfh.hfunc = ETH_RSS_HASH_NO_CHANGE;
+
+ ret = rss_set_prep_hkey(dev, info, &data, &rxfh, &mod);
+ if (ret)
+ goto exit_free_indir;
+
+ rxfh.input_xfrm = data.input_xfrm;
+ ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod);
+ /* For drivers which don't support input_xfrm it will be set to 0xff
+ * in the RSS context info. In all other case input_xfrm != 0 means
+ * symmetric hashing is requested.
+ */
+ if (!request->rss_context || ops->rxfh_per_ctx_key)
+ xfrm_sym = rxfh.input_xfrm || data.input_xfrm;
+ if (rxfh.input_xfrm == data.input_xfrm)
+ rxfh.input_xfrm = RXH_XFRM_NO_CHANGE;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ if (request->rss_context) {
+ ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto exit_unlock;
+ }
+ }
+
+ ret = ethnl_set_rss_fields(dev, info, request->rss_context,
+ &data, xfrm_sym, &fields_mod);
+ if (ret)
+ goto exit_unlock;
+
+ if (!mod)
+ ret = 0; /* nothing to tell the driver */
+ else if (!ops->set_rxfh)
+ ret = -EOPNOTSUPP;
+ else if (!rxfh.rss_context)
+ ret = ops->set_rxfh(dev, &rxfh, info->extack);
+ else
+ ret = ops->modify_rxfh_context(dev, ctx, &rxfh, info->extack);
+ if (ret)
+ goto exit_unlock;
+
+ if (ctx)
+ rss_set_ctx_update(ctx, tb, &data, &rxfh);
+ else if (indir_reset)
+ dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+ else if (indir_mod)
+ dev->priv_flags |= IFF_RXFH_CONFIGURED;
+
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+ kfree(rxfh.key);
+exit_free_indir:
+ kfree(rxfh.indir);
+exit_clean_data:
+ rss_cleanup_data(&data.base);
+
+ return ret ?: mod || fields_mod;
+}
+
const struct ethnl_request_ops ethnl_rss_request_ops = {
.request_cmd = ETHTOOL_MSG_RSS_GET,
.reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY,
@@ -370,4 +924,282 @@ const struct ethnl_request_ops ethnl_rss_request_ops = {
.reply_size = rss_reply_size,
.fill_reply = rss_fill_reply,
.cleanup_data = rss_cleanup_data,
+
+ .set_validate = ethnl_rss_set_validate,
+ .set = ethnl_rss_set,
+ .set_ntf_cmd = ETHTOOL_MSG_RSS_NTF,
+};
+
+/* RSS_CREATE */
+
+const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RSS_HFUNC] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RSS_INDIR] = NLA_POLICY_MIN(NLA_BINARY, 1),
+ [ETHTOOL_A_RSS_HKEY] = NLA_POLICY_MIN(NLA_BINARY, 1),
+ [ETHTOOL_A_RSS_INPUT_XFRM] =
+ NLA_POLICY_MAX(NLA_U32, RXH_XFRM_SYM_OR_XOR),
};
+
+static int
+ethnl_rss_create_validate(struct net_device *dev, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct nlattr **tb = info->attrs;
+ struct nlattr *bad_attr = NULL;
+ u32 rss_context, input_xfrm;
+
+ if (!ops->create_rxfh_context)
+ return -EOPNOTSUPP;
+
+ rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0);
+ if (ops->rxfh_max_num_contexts &&
+ ops->rxfh_max_num_contexts <= rss_context) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]);
+ return -ERANGE;
+ }
+
+ if (!ops->rxfh_per_ctx_key) {
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HFUNC];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HKEY];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+ }
+
+ input_xfrm = nla_get_u32_default(tb[ETHTOOL_A_RSS_INPUT_XFRM], 0);
+ if (input_xfrm & ~ops->supported_input_xfrm)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+
+ if (bad_attr) {
+ NL_SET_BAD_ATTR(info->extack, bad_attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+ethnl_rss_create_send_ntf(struct sk_buff *rsp, struct net_device *dev)
+{
+ struct nlmsghdr *nlh = (void *)rsp->data;
+ struct genlmsghdr *genl_hdr;
+
+ /* Convert the reply into a notification */
+ nlh->nlmsg_pid = 0;
+ nlh->nlmsg_seq = ethnl_bcast_seq_next();
+
+ genl_hdr = nlmsg_data(nlh);
+ genl_hdr->cmd = ETHTOOL_MSG_RSS_CREATE_NTF;
+
+ ethnl_multicast(rsp, dev);
+}
+
+int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ bool indir_dflt = false, mod = false, ntf_fail = false;
+ struct ethtool_rxfh_param rxfh = {};
+ struct ethtool_rxfh_context *ctx;
+ struct nlattr **tb = info->attrs;
+ struct rss_reply_data data = {};
+ const struct ethtool_ops *ops;
+ struct rss_req_info req = {};
+ struct net_device *dev;
+ struct sk_buff *rsp;
+ void *hdr;
+ u32 limit;
+ int ret;
+
+ rsp = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ ret = ethnl_parse_header_dev_get(&req.base, tb[ETHTOOL_A_RSS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ goto exit_free_rsp;
+
+ dev = req.base.dev;
+ ops = dev->ethtool_ops;
+
+ req.rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0);
+
+ ret = ethnl_rss_create_validate(dev, info);
+ if (ret)
+ goto exit_free_dev;
+
+ rtnl_lock();
+ netdev_lock_ops(dev);
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto exit_dev_unlock;
+
+ ret = rss_get_data_alloc(dev, &data);
+ if (ret)
+ goto exit_ops;
+
+ ret = rss_set_prep_indir(dev, info, &data, &rxfh, &indir_dflt, &mod);
+ if (ret)
+ goto exit_clean_data;
+
+ ethnl_update_u8(&rxfh.hfunc, tb[ETHTOOL_A_RSS_HFUNC], &mod);
+
+ ret = rss_set_prep_hkey(dev, info, &data, &rxfh, &mod);
+ if (ret)
+ goto exit_free_indir;
+
+ rxfh.input_xfrm = RXH_XFRM_NO_CHANGE;
+ ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod);
+
+ ctx = ethtool_rxfh_ctx_alloc(ops, data.indir_size, data.hkey_size);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto exit_free_hkey;
+ }
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ if (!req.rss_context) {
+ limit = ops->rxfh_max_num_contexts ?: U32_MAX;
+ ret = xa_alloc(&dev->ethtool->rss_ctx, &req.rss_context, ctx,
+ XA_LIMIT(1, limit - 1), GFP_KERNEL_ACCOUNT);
+ } else {
+ ret = xa_insert(&dev->ethtool->rss_ctx,
+ req.rss_context, ctx, GFP_KERNEL_ACCOUNT);
+ }
+ if (ret < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT],
+ "error allocating context ID");
+ goto err_unlock_free_ctx;
+ }
+ rxfh.rss_context = req.rss_context;
+
+ ret = ops->create_rxfh_context(dev, ctx, &rxfh, info->extack);
+ if (ret)
+ goto err_ctx_id_free;
+
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!rxfh.key && ops->rxfh_per_ctx_key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx), 0,
+ ctx->key_size));
+
+ /* Store the config from rxfh to Xarray.. */
+ rss_set_ctx_update(ctx, tb, &data, &rxfh);
+ /* .. copy from Xarray to data. */
+ __rss_prepare_ctx(dev, &data, ctx);
+
+ hdr = ethnl_unicast_put(rsp, info->snd_portid, info->snd_seq,
+ ETHTOOL_MSG_RSS_CREATE_ACT_REPLY);
+ ntf_fail = ethnl_fill_reply_header(rsp, dev, ETHTOOL_A_RSS_HEADER);
+ ntf_fail |= rss_fill_reply(rsp, &req.base, &data.base);
+ if (WARN_ON(!hdr || ntf_fail)) {
+ ret = -EMSGSIZE;
+ goto exit_unlock;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ /* Use the same skb for the response and the notification,
+ * genlmsg_reply() will copy the skb if it has elevated user count.
+ */
+ skb_get(rsp);
+ ret = genlmsg_reply(rsp, info);
+ ethnl_rss_create_send_ntf(rsp, dev);
+ rsp = NULL;
+
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+exit_free_hkey:
+ kfree(rxfh.key);
+exit_free_indir:
+ kfree(rxfh.indir);
+exit_clean_data:
+ rss_get_data_free(&data);
+exit_ops:
+ ethnl_ops_complete(dev);
+exit_dev_unlock:
+ netdev_unlock_ops(dev);
+ rtnl_unlock();
+exit_free_dev:
+ ethnl_parse_header_dev_put(&req.base);
+exit_free_rsp:
+ nlmsg_free(rsp);
+ return ret;
+
+err_ctx_id_free:
+ xa_erase(&dev->ethtool->rss_ctx, req.rss_context);
+err_unlock_free_ctx:
+ kfree(ctx);
+ goto exit_unlock;
+}
+
+/* RSS_DELETE */
+
+const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_rxfh_context *ctx;
+ struct nlattr **tb = info->attrs;
+ struct ethnl_req_info req = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ u32 rss_context;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_RSS_CONTEXT))
+ return -EINVAL;
+ rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]);
+
+ ret = ethnl_parse_header_dev_get(&req, tb[ETHTOOL_A_RSS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ dev = req.dev;
+ ops = dev->ethtool_ops;
+
+ if (!ops->create_rxfh_context)
+ goto exit_free_dev;
+
+ rtnl_lock();
+ netdev_lock_ops(dev);
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto exit_dev_unlock;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ ret = ethtool_check_rss_ctx_busy(dev, rss_context);
+ if (ret)
+ goto exit_unlock;
+
+ ctx = xa_load(&dev->ethtool->rss_ctx, rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto exit_unlock;
+ }
+
+ ret = ops->remove_rxfh_context(dev, ctx, rss_context, info->extack);
+ if (ret)
+ goto exit_unlock;
+
+ WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rss_context) != ctx);
+ kfree(ctx);
+
+ ethnl_rss_delete_notify(dev, rss_context);
+
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+ ethnl_ops_complete(dev);
+exit_dev_unlock:
+ netdev_unlock_ops(dev);
+ rtnl_unlock();
+exit_free_dev:
+ ethnl_parse_header_dev_put(&req);
+ return ret;
+}
diff --git a/net/ethtool/tsconfig.c b/net/ethtool/tsconfig.c
index 2be356bdfe87..169b413b31fc 100644
--- a/net/ethtool/tsconfig.c
+++ b/net/ethtool/tsconfig.c
@@ -423,13 +423,11 @@ static int ethnl_set_tsconfig(struct ethnl_req_info *req_base,
return ret;
}
- if (hwprov_mod || config_mod) {
- ret = tsconfig_send_reply(dev, info);
- if (ret && ret != -EOPNOTSUPP) {
- NL_SET_ERR_MSG(info->extack,
- "error while reading the new configuration set");
- return ret;
- }
+ ret = tsconfig_send_reply(dev, info);
+ if (ret && ret != -EOPNOTSUPP) {
+ NL_SET_ERR_MSG(info->extack,
+ "error while reading the new configuration set");
+ return ret;
}
/* tsconfig has no notification */
diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c
index d6f52839827e..081093dfd553 100644
--- a/net/handshake/tlshd.c
+++ b/net/handshake/tlshd.c
@@ -230,6 +230,12 @@ static int tls_handshake_accept(struct handshake_req *req,
if (ret < 0)
goto out_cancel;
}
+ if (treq->th_keyring) {
+ ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_KEYRING,
+ treq->th_keyring);
+ if (ret < 0)
+ goto out_cancel;
+ }
ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_AUTH_MODE,
treq->th_auth_mode);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 88657255fec1..fbbc3ccf9df6 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -49,7 +49,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
ASSERT_RTNL();
- hsr_for_each_port(master->hsr, port) {
+ hsr_for_each_port_rtnl(master->hsr, port) {
if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
netif_carrier_on(master->dev);
return true;
@@ -105,7 +105,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
struct hsr_port *port;
mtu_max = ETH_DATA_LEN;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type != HSR_PT_MASTER)
mtu_max = min(port->dev->mtu, mtu_max);
@@ -139,7 +139,7 @@ static int hsr_dev_open(struct net_device *dev)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -172,7 +172,7 @@ static int hsr_dev_close(struct net_device *dev)
struct hsr_priv *hsr;
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -205,7 +205,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
* may become enabled.
*/
features &= ~NETIF_F_ONE_FOR_ALL;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
features = netdev_increment_features(features,
port->dev->features,
mask);
@@ -226,6 +226,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct hsr_priv *hsr = netdev_priv(dev);
struct hsr_port *master;
+ rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (master) {
skb->dev = master->dev;
@@ -238,6 +239,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
}
+ rcu_read_unlock();
+
return NETDEV_TX_OK;
}
@@ -484,7 +487,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -506,7 +509,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -534,7 +537,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER ||
port->type == HSR_PT_INTERLINK)
continue;
@@ -580,7 +583,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
switch (port->type) {
case HSR_PT_SLAVE_A:
case HSR_PT_SLAVE_B:
@@ -672,9 +675,14 @@ struct net_device *hsr_get_port_ndev(struct net_device *ndev,
struct hsr_priv *hsr = netdev_priv(ndev);
struct hsr_port *port;
+ rcu_read_lock();
hsr_for_each_port(hsr, port)
- if (port->type == pt)
+ if (port->type == pt) {
+ dev_hold(port->dev);
+ rcu_read_unlock();
return port->dev;
+ }
+ rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL(hsr_get_port_ndev);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 192893c3f2ec..bc94b07101d8 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
{
struct hsr_port *port;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type != HSR_PT_MASTER)
return false;
return true;
@@ -134,7 +134,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
{
struct hsr_port *port;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type == pt)
return port;
return NULL;
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 135ec5fce019..33b0d2460c9b 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -224,6 +224,9 @@ struct hsr_priv {
#define hsr_for_each_port(hsr, port) \
list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+#define hsr_for_each_port_rtnl(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
+
struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
/* Caller must ensure skb is a valid HSR frame */
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index b120470246cc..c96b63adf96f 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -34,12 +34,18 @@ static int hsr_newlink(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net *link_net = rtnl_newlink_link_net(params);
+ struct net_device *link[2], *interlink = NULL;
struct nlattr **data = params->data;
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
- struct net_device *link[2], *interlink = NULL;
+ if (!net_eq(link_net, dev_net(dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "HSR slaves/interlink must be on the same net namespace than HSR link");
+ return -EINVAL;
+ }
+
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index b87b6a6fe070..8177ac6c2d26 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -63,8 +63,14 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
- protocol == htons(ETH_P_HSR))
+ protocol == htons(ETH_P_HSR)) {
+ if (!pskb_may_pull(skb, ETH_HLEN + HSR_HLEN)) {
+ kfree_skb(skb);
+ goto finish_consume;
+ }
+
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
+ }
skb_reset_mac_len(skb);
/* Only the frames received over the interlink port will assign a
@@ -137,6 +143,7 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
struct netlink_ext_ack *extack)
{
+ struct netdev_lag_upper_info lag_upper_info;
struct net_device *hsr_dev;
struct hsr_port *master;
int res;
@@ -153,7 +160,9 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr_dev = master->dev;
- res = netdev_upper_dev_link(dev, hsr_dev, extack);
+ lag_upper_info.tx_type = NETDEV_LAG_TX_TYPE_BROADCAST;
+ lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
+ res = netdev_master_upper_dev_link(dev, hsr_dev, NULL, &lag_upper_info, extack);
if (res)
goto fail_upper_dev_link;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 76e38092cd8a..3109c5ec38f3 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -102,6 +102,7 @@
#include <net/gro.h>
#include <net/gso.h>
#include <net/tcp.h>
+#include <net/psp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/ping.h>
@@ -158,6 +159,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
+ psp_sk_assoc_free(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -1393,14 +1395,10 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
segs = ERR_PTR(-EPROTONOSUPPORT);
- if (!skb->encapsulation || encap) {
- udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
- fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
+ fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap));
- /* fixed ID is invalid if DF bit is not set */
- if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
- goto out;
- }
+ if (!skb->encapsulation || encap)
+ udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a648fff71ea7..833f2cf97178 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -170,7 +170,7 @@ struct neigh_table arp_tbl = {
[NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
[NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ,
[NEIGH_VAR_GC_STALETIME] = 60 * HZ,
- [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_DEFAULT,
[NEIGH_VAR_PROXY_QLEN] = 64,
[NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ,
[NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10,
@@ -864,7 +864,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
(arp_fwd_proxy(in_dev, dev, rt) ||
arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
(rt->dst.dev != dev &&
- pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
+ pneigh_lookup(&arp_tbl, net, &tip, dev)))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
@@ -966,6 +966,7 @@ static int arp_is_multicast(const void *pkey)
static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
+ enum skb_drop_reason drop_reason;
const struct arphdr *arp;
/* do not tweak dropwatch on an ARP we will ignore */
@@ -979,12 +980,15 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
goto out_of_mem;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
+ drop_reason = pskb_may_pull_reason(skb, arp_hdr_len(dev));
+ if (drop_reason != SKB_NOT_DROPPED_YET)
goto freeskb;
arp = arp_hdr(skb);
- if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
+ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) {
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
goto freeskb;
+ }
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -996,7 +1000,7 @@ consumeskb:
consume_skb(skb);
return NET_RX_SUCCESS;
freeskb:
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
out_of_mem:
return NET_RX_DROP;
}
@@ -1085,9 +1089,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
if (mask) {
__be32 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
- if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
- return -ENOBUFS;
- return 0;
+ return pneigh_create(&arp_tbl, net, &ip, dev, 0, 0, false);
}
return arp_req_set_proxy(net, dev, 1);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 740af8541d2f..709021197e1c 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1715,8 +1715,7 @@ validate_return:
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
- unsigned char optbuf[sizeof(struct ip_options) + 40];
- struct ip_options *opt = (struct ip_options *)optbuf;
+ struct inet_skb_parm parm;
int res;
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
@@ -1727,19 +1726,19 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
* so we can not use icmp_send and IPCB here.
*/
- memset(opt, 0, sizeof(struct ip_options));
- opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+ memset(&parm, 0, sizeof(parm));
+ parm.opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
- res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
+ res = __ip_options_compile(dev_net(skb->dev), &parm.opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
if (gateway)
- __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, &parm);
else
- __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, &parm);
}
/**
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 4b5bc6eb52e7..c2b2cda1a7e5 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -109,7 +109,7 @@ void ip4_datagram_release_cb(struct sock *sk)
rcu_read_lock();
dst = __sk_dst_get(sk);
- if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
+ if (!dst || !READ_ONCE(dst->obsolete) || dst->ops->check(dst, 0)) {
rcu_read_unlock();
return;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c47d3828d4f6..942a887bf089 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -340,14 +340,13 @@ static void inetdev_destroy(struct in_device *in_dev)
static int __init inet_blackhole_dev_init(void)
{
- int err = 0;
+ struct in_device *in_dev;
rtnl_lock();
- if (!inetdev_init(blackhole_netdev))
- err = -ENOMEM;
+ in_dev = inetdev_init(blackhole_netdev);
rtnl_unlock();
- return err;
+ return PTR_ERR_OR_ZERO(in_dev);
}
late_initcall(inet_blackhole_dev_init);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index f14a41ee4aa1..2c922afadb8f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -132,8 +132,8 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
dport = encap->encap_dport;
spin_unlock_bh(&x->lock);
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
- dport, x->props.saddr.a4, sport, 0);
+ sk = inet_lookup_established(net, x->id.daddr.a4, dport,
+ x->props.saddr.a4, sport, 0);
if (!sk)
return ERR_PTR(-ENOENT);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index fd1e1507a224..1dab44e13d3b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
+#include <net/flow.h>
#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -293,7 +294,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_l3mdev = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(skb))),
+ .flowi4_dscp = ip4h_dscp(ip_hdr(skb)),
.flowi4_scope = scope,
.flowi4_mark = vmark ? skb->mark : 0,
};
@@ -358,7 +359,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
fl4.daddr = src;
fl4.saddr = dst;
- fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4.flowi4_dscp = dscp;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
@@ -1372,7 +1373,7 @@ static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
struct flowi4 fl4 = {
.flowi4_mark = frn->fl_mark,
.daddr = frn->fl_addr,
- .flowi4_tos = frn->fl_tos & INET_DSCP_MASK,
+ .flowi4_dscp = inet_dsfield_to_dscp(frn->fl_tos),
.flowi4_scope = frn->fl_scope,
};
struct fib_table *tb;
@@ -1524,7 +1525,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_disable_ip(dev, event, false);
break;
case NETDEV_CHANGE:
- flags = dev_get_flags(dev);
+ flags = netif_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index fa58d6620ed6..51f0193092f0 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
+#include <net/flow.h>
#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/route.h>
@@ -193,8 +194,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule,
* to mask the upper three DSCP bits prior to matching to maintain
* legacy behavior.
*/
- if (r->dscp_full &&
- (r->dscp ^ inet_dsfield_to_dscp(fl4->flowi4_tos)) & r->dscp_mask)
+ if (r->dscp_full && (r->dscp ^ fl4->flowi4_dscp) & r->dscp_mask)
return 0;
else if (!r->dscp_full && r->dscp &&
!fib_dscp_masked_match(r->dscp, fl4))
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d643bd1a0d9d..a5f3c8459758 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -625,11 +625,6 @@ int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
if (encap) {
struct lwtunnel_state *lwtstate;
- if (encap_type == LWTUNNEL_ENCAP_NONE) {
- NL_SET_ERR_MSG(extack, "LWT encap type not specified");
- err = -EINVAL;
- goto lwt_failure;
- }
err = lwtunnel_build_state(net, encap_type, encap,
nhc->nhc_family, cfg, &lwtstate,
extack);
@@ -1640,8 +1635,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
goto nla_put_failure;
- if (nhc->nhc_lwtstate &&
- lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
+ if (lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
@@ -2093,7 +2087,7 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags)
return 0;
if (nh_flags & RTNH_F_DEAD) {
- unsigned int flags = dev_get_flags(dev);
+ unsigned int flags = netif_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
nh_flags |= RTNH_F_LINKDOWN;
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 3e30745e2c09..3970b6b7ace5 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -228,21 +228,27 @@ drop:
return 0;
}
+static const struct net_offload *fou_gro_ops(const struct sock *sk,
+ int proto)
+{
+ const struct net_offload __rcu **offloads;
+
+ /* FOU doesn't allow IPv4 on IPv6 sockets. */
+ offloads = sk->sk_family == AF_INET6 ? inet6_offloads : inet_offloads;
+ return rcu_dereference(offloads[proto]);
+}
+
static struct sk_buff *fou_gro_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
- const struct net_offload __rcu **offloads;
struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
struct sk_buff *pp = NULL;
- u8 proto;
if (!fou)
goto out;
- proto = fou->protocol;
-
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
* header to the outer L3 tunnel header, or we are simply
@@ -254,8 +260,7 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[proto]);
+ ops = fou_gro_ops(sk, fou->protocol);
if (!ops || !ops->callbacks.gro_receive)
goto out;
@@ -268,10 +273,8 @@ out:
static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
int nhoff)
{
- const struct net_offload __rcu **offloads;
struct fou *fou = fou_from_sock(sk);
const struct net_offload *ops;
- u8 proto;
int err;
if (!fou) {
@@ -279,10 +282,7 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
goto out;
}
- proto = fou->protocol;
-
- offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[proto]);
+ ops = fou_gro_ops(sk, fou->protocol);
if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
err = -ENOSYS;
goto out;
@@ -323,7 +323,6 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
{
- const struct net_offload __rcu **offloads;
const struct net_offload *ops;
struct sk_buff *pp = NULL;
struct sk_buff *p;
@@ -450,8 +449,7 @@ next_proto:
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[proto]);
+ ops = fou_gro_ops(sk, proto);
if (!ops || !ops->callbacks.gro_receive)
goto out;
@@ -467,7 +465,6 @@ out:
static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
- const struct net_offload __rcu **offloads;
const struct net_offload *ops;
unsigned int guehlen = 0;
u8 proto;
@@ -494,8 +491,7 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return err;
}
- offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
- ops = rcu_dereference(offloads[proto]);
+ ops = fou_gro_ops(sk, proto);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out;
diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c
index 3d9614609b2d..506260b4a4dc 100644
--- a/net/ipv4/fou_nl.c
+++ b/net/ipv4/fou_nl.c
@@ -18,9 +18,9 @@ const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = {
[FOU_ATTR_TYPE] = { .type = NLA_U8, },
[FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
[FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
- [FOU_ATTR_LOCAL_V6] = { .len = 16, },
+ [FOU_ATTR_LOCAL_V6] = NLA_POLICY_EXACT_LEN(16),
[FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
- [FOU_ATTR_PEER_V6] = { .len = 16, },
+ [FOU_ATTR_PEER_V6] = NLA_POLICY_EXACT_LEN(16),
[FOU_ATTR_PEER_PORT] = { .type = NLA_BE16, },
[FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
};
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 717cb7d3607a..1b7fb5d935ed 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -72,6 +72,7 @@
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
+#include <net/flow.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
@@ -311,22 +312,24 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
{
struct dst_entry *dst = &rt->dst;
struct inet_peer *peer;
+ struct net_device *dev;
bool rc = true;
if (!apply_ratelimit)
return true;
/* No rate limit on loopback */
- if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ if (dev && (dev->flags & IFF_LOOPBACK))
goto out;
- rcu_read_lock();
peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr,
- l3mdev_master_ifindex_rcu(dst->dev));
+ l3mdev_master_ifindex_rcu(dev));
rc = inet_peer_xrlim_allow(peer,
READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));
- rcu_read_unlock();
out:
+ rcu_read_unlock();
if (!rc)
__ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST);
else
@@ -442,7 +445,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = sock_net_uid(net, NULL);
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(skb)));
+ fl4.flowi4_dscp = ip4h_dscp(ip_hdr(skb));
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
@@ -466,13 +469,13 @@ out_bh_enable:
*/
static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
{
- struct net_device *route_lookup_dev = NULL;
+ struct net_device *dev = skb->dev;
+ const struct dst_entry *dst;
- if (skb->dev)
- route_lookup_dev = skb->dev;
- else if (skb_dst(skb))
- route_lookup_dev = skb_dst(skb)->dev;
- return route_lookup_dev;
+ if (dev)
+ return dev;
+ dst = skb_dst(skb);
+ return dst ? dst_dev(dst) : NULL;
}
static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
@@ -493,7 +496,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_uid = sock_net_uid(net, NULL);
- fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4->flowi4_dscp = dscp;
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
@@ -542,14 +545,15 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4,
goto relookup_failed;
}
/* Ugh! */
- orefdst = skb_in->_skb_refdst; /* save old refdst */
- skb_dst_set(skb_in, NULL);
+ orefdst = skb_dstref_steal(skb_in);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
dscp, rt2->dst.dev) ? -EINVAL : 0;
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
- skb_in->_skb_refdst = orefdst; /* restore old refdst */
+ /* steal dst entry from skb_in, don't drop refcnt */
+ skb_dstref_steal(skb_in);
+ skb_dstref_restore(skb_in, orefdst);
}
if (err)
@@ -590,7 +594,7 @@ relookup_failed:
*/
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
- const struct ip_options *opt)
+ const struct inet_skb_parm *parm)
{
struct iphdr *iph;
int room;
@@ -706,7 +710,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
rcu_read_lock();
if (rt_is_input_route(rt) &&
READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr))
- dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
+ dev = dev_get_by_index_rcu(net, parm->iif ? parm->iif :
+ inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, iph->saddr,
@@ -721,7 +726,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
- if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
+ if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in,
+ &parm->opt))
goto out_unlock;
@@ -795,14 +801,16 @@ EXPORT_SYMBOL(__icmp_send);
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct sk_buff *cloned_skb = NULL;
- struct ip_options opts = { 0 };
enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ struct inet_skb_parm parm;
struct nf_conn *ct;
__be32 orig_ip;
+ memset(&parm, 0, sizeof(parm));
ct = nf_ct_get(skb_in, &ctinfo);
- if (!ct || !(ct->status & IPS_SRC_NAT)) {
- __icmp_send(skb_in, type, code, info, &opts);
+ if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) {
+ __icmp_send(skb_in, type, code, info, &parm);
return;
}
@@ -816,8 +824,9 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
goto out;
orig_ip = ip_hdr(skb_in)->saddr;
- ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
- __icmp_send(skb_in, type, code, info, &opts);
+ dir = CTINFO2DIR(ctinfo);
+ ip_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ __icmp_send(skb_in, type, code, info, &parm);
ip_hdr(skb_in)->saddr = orig_ip;
out:
consume_skb(cloned_skb);
@@ -869,7 +878,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)
struct net *net;
u32 info = 0;
- net = dev_net_rcu(skb_dst(skb)->dev);
+ net = skb_dst_dev_net_rcu(skb);
/*
* Incomplete header ?
@@ -1012,7 +1021,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)
struct icmp_bxm icmp_param;
struct net *net;
- net = dev_net_rcu(skb_dst(skb)->dev);
+ net = skb_dst_dev_net_rcu(skb);
/* should there be an ICMP stat for ignored echos? */
if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))
return SKB_NOT_DROPPED_YET;
@@ -1182,7 +1191,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb)
return SKB_NOT_DROPPED_YET;
out_err:
- __ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
+ __ICMP_INC_STATS(skb_dst_dev_net_rcu(skb), ICMP_MIB_INERRORS);
return SKB_DROP_REASON_PKT_TOO_SMALL;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d1769034b643..7182f1419c2a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -427,7 +427,7 @@ static int igmpv3_sendpack(struct sk_buff *skb)
pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
- return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ return ip_local_out(skb_dst_dev_net(skb), skb->sk, skb);
}
static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6906bedad19a..cdd1e12aac8c 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -168,7 +168,7 @@ static bool inet_use_bhash2_on_bind(const struct sock *sk)
}
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
- kuid_t sk_uid, bool relax,
+ kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
int bound_dev_if2;
@@ -185,12 +185,12 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
if (!relax || (!reuseport_ok && sk->sk_reuseport &&
sk2->sk_reuseport && reuseport_cb_ok &&
(sk2->sk_state == TCP_TIME_WAIT ||
- uid_eq(sk_uid, sock_i_uid(sk2)))))
+ uid_eq(uid, sk_uid(sk2)))))
return true;
} else if (!reuseport_ok || !sk->sk_reuseport ||
!sk2->sk_reuseport || !reuseport_cb_ok ||
(sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(sk_uid, sock_i_uid(sk2)))) {
+ !uid_eq(uid, sk_uid(sk2)))) {
return true;
}
}
@@ -198,7 +198,7 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
}
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
- kuid_t sk_uid, bool relax,
+ kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
if (ipv6_only_sock(sk2)) {
@@ -211,20 +211,20 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
#endif
}
- return inet_bind_conflict(sk, sk2, sk_uid, relax,
+ return inet_bind_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok);
}
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
- kuid_t sk_uid,
+ kuid_t uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{
struct sock *sk2;
sk_for_each_bound(sk2, &tb2->owners) {
- if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+ if (__inet_bhash2_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok))
return true;
}
@@ -242,8 +242,8 @@ static int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2, /* may be null */
bool relax, bool reuseport_ok)
{
- kuid_t uid = sock_i_uid((struct sock *)sk);
struct sock_reuseport *reuseport_cb;
+ kuid_t uid = sk_uid(sk);
bool reuseport_cb_ok;
struct sock *sk2;
@@ -287,11 +287,11 @@ static int inet_csk_bind_conflict(const struct sock *sk,
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
bool relax, bool reuseport_ok)
{
- kuid_t uid = sock_i_uid((struct sock *)sk);
const struct net *net = sock_net(sk);
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
+ kuid_t uid = sk_uid(sk);
bool conflict = false;
bool reuseport_cb_ok;
@@ -423,17 +423,15 @@ success:
}
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
- struct sock *sk)
+ const struct sock *sk)
{
- kuid_t uid = sock_i_uid(sk);
-
if (tb->fastreuseport <= 0)
return 0;
if (!sk->sk_reuseport)
return 0;
if (rcu_access_pointer(sk->sk_reuseport_cb))
return 0;
- if (!uid_eq(tb->fastuid, uid))
+ if (!uid_eq(tb->fastuid, sk_uid(sk)))
return 0;
/* We only need to check the rcv_saddr if this tb was once marked
* without fastreuseport and then was reset, as we can only know that
@@ -455,17 +453,17 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
ipv6_only_sock(sk), true, false);
}
-void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
- struct sock *sk)
+void inet_csk_update_fastreuse(const struct sock *sk,
+ struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2)
{
- kuid_t uid = sock_i_uid(sk);
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
if (hlist_empty(&tb->bhash2)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
tb->fastreuseport = FASTREUSEPORT_ANY;
- tb->fastuid = uid;
+ tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
@@ -492,7 +490,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
*/
if (!sk_reuseport_match(tb, sk)) {
tb->fastreuseport = FASTREUSEPORT_STRICT;
- tb->fastuid = uid;
+ tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
@@ -504,6 +502,9 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
tb->fastreuseport = 0;
}
}
+
+ tb2->fastreuse = tb->fastreuse;
+ tb2->fastreuseport = tb->fastreuseport;
}
/* Obtain a reference to a local port for the given sock,
@@ -585,7 +586,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
}
success:
- inet_csk_update_fastreuse(tb, sk);
+ inet_csk_update_fastreuse(sk, tb, tb2);
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, tb2, port);
@@ -709,9 +710,9 @@ struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg)
spin_unlock_bh(&queue->fastopenq.lock);
}
-out:
release_sock(sk);
- if (newsk && mem_cgroup_sockets_enabled) {
+
+ if (mem_cgroup_sockets_enabled) {
gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
int amt = 0;
@@ -721,7 +722,7 @@ out:
lock_sock(newsk);
mem_cgroup_sk_alloc(newsk);
- if (newsk->sk_memcg) {
+ if (mem_cgroup_from_sk(newsk)) {
/* The socket has not been accepted yet, no need
* to look at newsk->sk_wmem_queued.
*/
@@ -730,23 +731,22 @@ out:
}
if (amt)
- mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp);
+ mem_cgroup_sk_charge(newsk, amt, gfp);
kmem_cache_charge(newsk, gfp);
release_sock(newsk);
}
+
if (req)
reqsk_put(req);
- if (newsk)
- inet_init_csk_locks(newsk);
-
+ inet_init_csk_locks(newsk);
return newsk;
+
out_err:
- newsk = NULL;
- req = NULL;
+ release_sock(sk);
arg->err = error;
- goto out;
+ return NULL;
}
EXPORT_SYMBOL(inet_csk_accept);
@@ -812,7 +812,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
- htons(ireq->ir_num), sk->sk_uid);
+ htons(ireq->ir_num), sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -849,7 +849,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
- htons(ireq->ir_num), sk->sk_uid);
+ htons(ireq->ir_num), sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -887,15 +887,6 @@ static void syn_ack_recalc(struct request_sock *req,
req->num_timeout >= rskq_defer_accept - 1;
}
-int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
-{
- int err = req->rsk_ops->rtx_syn_ack(parent, req);
-
- if (!err)
- req->num_retrans++;
- return err;
-}
-
static struct request_sock *
reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
@@ -1135,7 +1126,7 @@ static void reqsk_timer_handler(struct timer_list *t)
req->rsk_ops->syn_ack_timeout(req);
if (!expire &&
(!resend ||
- !inet_rtx_syn_ack(sk_listener, req) ||
+ !tcp_rtx_synack(sk_listener, req) ||
inet_rsk(req)->acked)) {
if (req->num_timeout++ == 0)
atomic_dec(&queue->young);
@@ -1309,12 +1300,19 @@ void inet_csk_destroy_sock(struct sock *sk)
xfrm_sk_free_policy(sk);
- this_cpu_dec(*sk->sk_prot->orphan_count);
+ tcp_orphan_count_dec();
sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
+void inet_csk_prepare_for_destroy_sock(struct sock *sk)
+{
+ /* The below has to be done to allow calling inet_csk_destroy_sock */
+ sock_set_flag(sk, SOCK_DEAD);
+ tcp_orphan_count_inc();
+}
+
/* This function allows to force a closure of a socket after the call to
* tcp_create_openreq_child().
*/
@@ -1382,7 +1380,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
sock_orphan(child);
- this_cpu_inc(*sk->sk_prot->orphan_count);
+ tcp_orphan_count_inc();
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1d1d6ad53f4c..f0b6c5a411a2 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -20,9 +20,6 @@
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet_timewait_sock.h>
-#include <net/inet6_hashtables.h>
#include <net/bpf_sk_storage.h>
#include <net/netlink.h>
@@ -74,54 +71,29 @@ static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
{
- r->idiag_family = sk->sk_family;
+ r->idiag_family = READ_ONCE(sk->sk_family);
- r->id.idiag_sport = htons(sk->sk_num);
- r->id.idiag_dport = sk->sk_dport;
- r->id.idiag_if = sk->sk_bound_dev_if;
+ r->id.idiag_sport = htons(READ_ONCE(sk->sk_num));
+ r->id.idiag_dport = READ_ONCE(sk->sk_dport);
+ r->id.idiag_if = READ_ONCE(sk->sk_bound_dev_if);
sock_diag_save_cookie(sk, r->id.idiag_cookie);
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
- *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
- *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
+ if (r->idiag_family == AF_INET6) {
+ data_race(*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr);
+ data_race(*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr);
} else
#endif
{
memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
- r->id.idiag_src[0] = sk->sk_rcv_saddr;
- r->id.idiag_dst[0] = sk->sk_daddr;
+ r->id.idiag_src[0] = READ_ONCE(sk->sk_rcv_saddr);
+ r->id.idiag_dst[0] = READ_ONCE(sk->sk_daddr);
}
}
EXPORT_SYMBOL_GPL(inet_diag_msg_common_fill);
-static size_t inet_sk_attr_size(struct sock *sk,
- const struct inet_diag_req_v2 *req,
- bool net_admin)
-{
- const struct inet_diag_handler *handler;
- size_t aux = 0;
-
- rcu_read_lock();
- handler = rcu_dereference(inet_diag_table[req->sdiag_protocol]);
- DEBUG_NET_WARN_ON_ONCE(!handler);
- if (handler && handler->idiag_get_aux_size)
- aux = handler->idiag_get_aux_size(sk, net_admin);
- rcu_read_unlock();
-
- return nla_total_size(sizeof(struct tcp_info))
- + nla_total_size(sizeof(struct inet_diag_msg))
- + inet_diag_msg_attrs_size()
- + nla_total_size(sizeof(struct inet_diag_meminfo))
- + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
- + nla_total_size(TCP_CA_NAME_MAX)
- + nla_total_size(sizeof(struct tcpvegas_info))
- + aux
- + 64;
-}
-
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns,
@@ -181,7 +153,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
goto errout;
#endif
- r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ r->idiag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->idiag_inode = sock_i_ino(sk);
memset(&inet_sockopt, 0, sizeof(inet_sockopt));
@@ -313,17 +285,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk_pending == ICSK_TIME_LOSS_PROBE) {
r->idiag_timer = 1;
- r->idiag_retrans = icsk->icsk_retransmits;
+ r->idiag_retrans = READ_ONCE(icsk->icsk_retransmits);
r->idiag_expires =
jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies);
} else if (icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
- r->idiag_retrans = icsk->icsk_probes_out;
+ r->idiag_retrans = READ_ONCE(icsk->icsk_probes_out);
r->idiag_expires =
jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
- r->idiag_retrans = icsk->icsk_probes_out;
+ r->idiag_retrans = READ_ONCE(icsk->icsk_probes_out);
r->idiag_expires =
jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
}
@@ -422,183 +394,6 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
-static int inet_twsk_diag_fill(struct sock *sk,
- struct sk_buff *skb,
- struct netlink_callback *cb,
- u16 nlmsg_flags, bool net_admin)
-{
- struct inet_timewait_sock *tw = inet_twsk(sk);
- struct inet_diag_msg *r;
- struct nlmsghdr *nlh;
- long tmo;
-
- nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
- sizeof(*r), nlmsg_flags);
- if (!nlh)
- return -EMSGSIZE;
-
- r = nlmsg_data(nlh);
- BUG_ON(tw->tw_state != TCP_TIME_WAIT);
-
- inet_diag_msg_common_fill(r, sk);
- r->idiag_retrans = 0;
-
- r->idiag_state = READ_ONCE(tw->tw_substate);
- r->idiag_timer = 3;
- tmo = tw->tw_timer.expires - jiffies;
- r->idiag_expires = jiffies_delta_to_msecs(tmo);
- r->idiag_rqueue = 0;
- r->idiag_wqueue = 0;
- r->idiag_uid = 0;
- r->idiag_inode = 0;
-
- if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
- tw->tw_mark)) {
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
- }
-
- nlmsg_end(skb, nlh);
- return 0;
-}
-
-static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
- struct netlink_callback *cb,
- u16 nlmsg_flags, bool net_admin)
-{
- struct request_sock *reqsk = inet_reqsk(sk);
- struct inet_diag_msg *r;
- struct nlmsghdr *nlh;
- long tmo;
-
- nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
- if (!nlh)
- return -EMSGSIZE;
-
- r = nlmsg_data(nlh);
- inet_diag_msg_common_fill(r, sk);
- r->idiag_state = TCP_SYN_RECV;
- r->idiag_timer = 1;
- r->idiag_retrans = reqsk->num_retrans;
-
- BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
- offsetof(struct sock, sk_cookie));
-
- tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
- r->idiag_expires = jiffies_delta_to_msecs(tmo);
- r->idiag_rqueue = 0;
- r->idiag_wqueue = 0;
- r->idiag_uid = 0;
- r->idiag_inode = 0;
-
- if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
- inet_rsk(reqsk)->ir_mark)) {
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
- }
-
- nlmsg_end(skb, nlh);
- return 0;
-}
-
-static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- u16 nlmsg_flags, bool net_admin)
-{
- if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
-
- if (sk->sk_state == TCP_NEW_SYN_RECV)
- return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
-
- return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
- net_admin);
-}
-
-struct sock *inet_diag_find_one_icsk(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct inet_diag_req_v2 *req)
-{
- struct sock *sk;
-
- rcu_read_lock();
- if (req->sdiag_family == AF_INET)
- sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
- req->id.idiag_dport, req->id.idiag_src[0],
- req->id.idiag_sport, req->id.idiag_if);
-#if IS_ENABLED(CONFIG_IPV6)
- else if (req->sdiag_family == AF_INET6) {
- if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
- ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
- sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
- req->id.idiag_dport, req->id.idiag_src[3],
- req->id.idiag_sport, req->id.idiag_if);
- else
- sk = inet6_lookup(net, hashinfo, NULL, 0,
- (struct in6_addr *)req->id.idiag_dst,
- req->id.idiag_dport,
- (struct in6_addr *)req->id.idiag_src,
- req->id.idiag_sport,
- req->id.idiag_if);
- }
-#endif
- else {
- rcu_read_unlock();
- return ERR_PTR(-EINVAL);
- }
- rcu_read_unlock();
- if (!sk)
- return ERR_PTR(-ENOENT);
-
- if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
- sock_gen_put(sk);
- return ERR_PTR(-ENOENT);
- }
-
- return sk;
-}
-EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
-
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *req)
-{
- struct sk_buff *in_skb = cb->skb;
- bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
- struct net *net = sock_net(in_skb->sk);
- struct sk_buff *rep;
- struct sock *sk;
- int err;
-
- sk = inet_diag_find_one_icsk(net, hashinfo, req);
- if (IS_ERR(sk))
- return PTR_ERR(sk);
-
- rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
- if (!rep) {
- err = -ENOMEM;
- goto out;
- }
-
- err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
- if (err < 0) {
- WARN_ON(err == -EMSGSIZE);
- nlmsg_free(rep);
- goto out;
- }
- err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
-
-out:
- if (sk)
- sock_gen_put(sk);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
-
static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
const struct nlmsghdr *nlh,
int hdrlen,
@@ -785,7 +580,7 @@ static void entry_fill_addrs(struct inet_diag_entry *entry,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6) {
+ if (entry->family == AF_INET6) {
entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
entry->daddr = sk->sk_v6_daddr.s6_addr32;
} else
@@ -796,31 +591,36 @@ static void entry_fill_addrs(struct inet_diag_entry *entry,
}
}
-int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+int inet_diag_bc_sk(const struct inet_diag_dump_data *cb_data, struct sock *sk)
{
- struct inet_sock *inet = inet_sk(sk);
+ const struct nlattr *bc = cb_data->inet_diag_nla_bc;
+ const struct inet_sock *inet = inet_sk(sk);
struct inet_diag_entry entry;
if (!bc)
return 1;
- entry.family = sk->sk_family;
+ entry.family = READ_ONCE(sk->sk_family);
entry_fill_addrs(&entry, sk);
- entry.sport = inet->inet_num;
- entry.dport = ntohs(inet->inet_dport);
- entry.ifindex = sk->sk_bound_dev_if;
- entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
- if (sk_fullsock(sk))
- entry.mark = READ_ONCE(sk->sk_mark);
- else if (sk->sk_state == TCP_NEW_SYN_RECV)
- entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
- else if (sk->sk_state == TCP_TIME_WAIT)
- entry.mark = inet_twsk(sk)->tw_mark;
- else
- entry.mark = 0;
+ entry.sport = READ_ONCE(inet->inet_num);
+ entry.dport = ntohs(READ_ONCE(inet->inet_dport));
+ entry.ifindex = READ_ONCE(sk->sk_bound_dev_if);
+ if (cb_data->userlocks_needed)
+ entry.userlocks = sk_fullsock(sk) ? READ_ONCE(sk->sk_userlocks) : 0;
+ if (cb_data->mark_needed) {
+ if (sk_fullsock(sk))
+ entry.mark = READ_ONCE(sk->sk_mark);
+ else if (sk->sk_state == TCP_NEW_SYN_RECV)
+ entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
+ else if (sk->sk_state == TCP_TIME_WAIT)
+ entry.mark = inet_twsk(sk)->tw_mark;
+ else
+ entry.mark = 0;
+ }
#ifdef CONFIG_SOCK_CGROUP_DATA
- entry.cgroup_id = sk_fullsock(sk) ?
- cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0;
+ if (cb_data->cgroup_needed)
+ entry.cgroup_id = sk_fullsock(sk) ?
+ cgroup_id(sock_cgroup_ptr(&sk->sk_cgrp_data)) : 0;
#endif
return inet_diag_bc_run(bc, &entry);
@@ -920,16 +720,21 @@ static bool valid_cgroupcond(const struct inet_diag_bc_op *op, int len,
}
#endif
-static int inet_diag_bc_audit(const struct nlattr *attr,
+static int inet_diag_bc_audit(struct inet_diag_dump_data *cb_data,
const struct sk_buff *skb)
{
- bool net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
+ const struct nlattr *attr = cb_data->inet_diag_nla_bc;
const void *bytecode, *bc;
int bytecode_len, len;
+ bool net_admin;
+
+ if (!attr)
+ return 0;
- if (!attr || nla_len(attr) < sizeof(struct inet_diag_bc_op))
+ if (nla_len(attr) < sizeof(struct inet_diag_bc_op))
return -EINVAL;
+ net_admin = netlink_net_capable(skb, CAP_NET_ADMIN);
bytecode = bc = nla_data(attr);
len = bytecode_len = nla_len(attr);
@@ -961,14 +766,18 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
return -EPERM;
if (!valid_markcond(bc, len, &min_len))
return -EINVAL;
+ cb_data->mark_needed = true;
break;
#ifdef CONFIG_SOCK_CGROUP_DATA
case INET_DIAG_BC_CGROUP_COND:
if (!valid_cgroupcond(bc, len, &min_len))
return -EINVAL;
+ cb_data->cgroup_needed = true;
break;
#endif
case INET_DIAG_BC_AUTO:
+ cb_data->userlocks_needed = true;
+ fallthrough;
case INET_DIAG_BC_JMP:
case INET_DIAG_BC_NOP:
break;
@@ -992,280 +801,6 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
return len == 0 ? 0 : -EINVAL;
}
-static void twsk_build_assert(void)
-{
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
- offsetof(struct sock, sk_family));
-
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
- offsetof(struct inet_sock, inet_num));
-
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
- offsetof(struct inet_sock, inet_dport));
-
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
- offsetof(struct inet_sock, inet_rcv_saddr));
-
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
- offsetof(struct inet_sock, inet_daddr));
-
-#if IS_ENABLED(CONFIG_IPV6)
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
- offsetof(struct sock, sk_v6_rcv_saddr));
-
- BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
- offsetof(struct sock, sk_v6_daddr));
-#endif
-}
-
-void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r)
-{
- bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
- struct inet_diag_dump_data *cb_data = cb->data;
- struct net *net = sock_net(skb->sk);
- u32 idiag_states = r->idiag_states;
- int i, num, s_i, s_num;
- struct nlattr *bc;
- struct sock *sk;
-
- bc = cb_data->inet_diag_nla_bc;
- if (idiag_states & TCPF_SYN_RECV)
- idiag_states |= TCPF_NEW_SYN_RECV;
- s_i = cb->args[1];
- s_num = num = cb->args[2];
-
- if (cb->args[0] == 0) {
- if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
- goto skip_listen_ht;
-
- for (i = s_i; i <= hashinfo->lhash2_mask; i++) {
- struct inet_listen_hashbucket *ilb;
- struct hlist_nulls_node *node;
-
- num = 0;
- ilb = &hashinfo->lhash2[i];
-
- if (hlist_nulls_empty(&ilb->nulls_head)) {
- s_num = 0;
- continue;
- }
- spin_lock(&ilb->lock);
- sk_nulls_for_each(sk, node, &ilb->nulls_head) {
- struct inet_sock *inet = inet_sk(sk);
-
- if (!net_eq(sock_net(sk), net))
- continue;
-
- if (num < s_num) {
- num++;
- continue;
- }
-
- if (r->sdiag_family != AF_UNSPEC &&
- sk->sk_family != r->sdiag_family)
- goto next_listen;
-
- if (r->id.idiag_sport != inet->inet_sport &&
- r->id.idiag_sport)
- goto next_listen;
-
- if (!inet_diag_bc_sk(bc, sk))
- goto next_listen;
-
- if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
- cb, r, NLM_F_MULTI,
- net_admin) < 0) {
- spin_unlock(&ilb->lock);
- goto done;
- }
-
-next_listen:
- ++num;
- }
- spin_unlock(&ilb->lock);
-
- s_num = 0;
- }
-skip_listen_ht:
- cb->args[0] = 1;
- s_i = num = s_num = 0;
- }
-
-/* Process a maximum of SKARR_SZ sockets at a time when walking hash buckets
- * with bh disabled.
- */
-#define SKARR_SZ 16
-
- /* Dump bound but inactive (not listening, connecting, etc.) sockets */
- if (cb->args[0] == 1) {
- if (!(idiag_states & TCPF_BOUND_INACTIVE))
- goto skip_bind_ht;
-
- for (i = s_i; i < hashinfo->bhash_size; i++) {
- struct inet_bind_hashbucket *ibb;
- struct inet_bind2_bucket *tb2;
- struct sock *sk_arr[SKARR_SZ];
- int num_arr[SKARR_SZ];
- int idx, accum, res;
-
-resume_bind_walk:
- num = 0;
- accum = 0;
- ibb = &hashinfo->bhash2[i];
-
- if (hlist_empty(&ibb->chain)) {
- s_num = 0;
- continue;
- }
- spin_lock_bh(&ibb->lock);
- inet_bind_bucket_for_each(tb2, &ibb->chain) {
- if (!net_eq(ib2_net(tb2), net))
- continue;
-
- sk_for_each_bound(sk, &tb2->owners) {
- struct inet_sock *inet = inet_sk(sk);
-
- if (num < s_num)
- goto next_bind;
-
- if (sk->sk_state != TCP_CLOSE ||
- !inet->inet_num)
- goto next_bind;
-
- if (r->sdiag_family != AF_UNSPEC &&
- r->sdiag_family != sk->sk_family)
- goto next_bind;
-
- if (!inet_diag_bc_sk(bc, sk))
- goto next_bind;
-
- sock_hold(sk);
- num_arr[accum] = num;
- sk_arr[accum] = sk;
- if (++accum == SKARR_SZ)
- goto pause_bind_walk;
-next_bind:
- num++;
- }
- }
-pause_bind_walk:
- spin_unlock_bh(&ibb->lock);
-
- res = 0;
- for (idx = 0; idx < accum; idx++) {
- if (res >= 0) {
- res = inet_sk_diag_fill(sk_arr[idx],
- NULL, skb, cb,
- r, NLM_F_MULTI,
- net_admin);
- if (res < 0)
- num = num_arr[idx];
- }
- sock_put(sk_arr[idx]);
- }
- if (res < 0)
- goto done;
-
- cond_resched();
-
- if (accum == SKARR_SZ) {
- s_num = num + 1;
- goto resume_bind_walk;
- }
-
- s_num = 0;
- }
-skip_bind_ht:
- cb->args[0] = 2;
- s_i = num = s_num = 0;
- }
-
- if (!(idiag_states & ~TCPF_LISTEN))
- goto out;
-
- for (i = s_i; i <= hashinfo->ehash_mask; i++) {
- struct inet_ehash_bucket *head = &hashinfo->ehash[i];
- spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
- struct hlist_nulls_node *node;
- struct sock *sk_arr[SKARR_SZ];
- int num_arr[SKARR_SZ];
- int idx, accum, res;
-
- if (hlist_nulls_empty(&head->chain))
- continue;
-
- if (i > s_i)
- s_num = 0;
-
-next_chunk:
- num = 0;
- accum = 0;
- spin_lock_bh(lock);
- sk_nulls_for_each(sk, node, &head->chain) {
- int state;
-
- if (!net_eq(sock_net(sk), net))
- continue;
- if (num < s_num)
- goto next_normal;
- state = (sk->sk_state == TCP_TIME_WAIT) ?
- READ_ONCE(inet_twsk(sk)->tw_substate) : sk->sk_state;
- if (!(idiag_states & (1 << state)))
- goto next_normal;
- if (r->sdiag_family != AF_UNSPEC &&
- sk->sk_family != r->sdiag_family)
- goto next_normal;
- if (r->id.idiag_sport != htons(sk->sk_num) &&
- r->id.idiag_sport)
- goto next_normal;
- if (r->id.idiag_dport != sk->sk_dport &&
- r->id.idiag_dport)
- goto next_normal;
- twsk_build_assert();
-
- if (!inet_diag_bc_sk(bc, sk))
- goto next_normal;
-
- if (!refcount_inc_not_zero(&sk->sk_refcnt))
- goto next_normal;
-
- num_arr[accum] = num;
- sk_arr[accum] = sk;
- if (++accum == SKARR_SZ)
- break;
-next_normal:
- ++num;
- }
- spin_unlock_bh(lock);
- res = 0;
- for (idx = 0; idx < accum; idx++) {
- if (res >= 0) {
- res = sk_diag_fill(sk_arr[idx], skb, cb, r,
- NLM_F_MULTI, net_admin);
- if (res < 0)
- num = num_arr[idx];
- }
- sock_gen_put(sk_arr[idx]);
- }
- if (res < 0)
- break;
- cond_resched();
- if (accum == SKARR_SZ) {
- s_num = num + 1;
- goto next_chunk;
- }
- }
-
-done:
- cb->args[1] = i;
- cb->args[2] = num;
-out:
- ;
-}
-EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
-
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
@@ -1319,13 +854,10 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
kfree(cb_data);
return err;
}
- nla = cb_data->inet_diag_nla_bc;
- if (nla) {
- err = inet_diag_bc_audit(nla, skb);
- if (err) {
- kfree(cb_data);
- return err;
- }
+ err = inet_diag_bc_audit(cb_data, skb);
+ if (err) {
+ kfree(cb_data);
+ return err;
}
nla = cb_data->inet_diag_nla_bpf_stgs;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 470ab17ceb51..025895eb6ec5 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -183,7 +183,7 @@ static void fqdir_work_fn(struct work_struct *work)
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
if (llist_add(&fqdir->free_list, &fqdir_free_list))
- queue_delayed_work(system_wq, &fqdir_free_work, HZ);
+ queue_delayed_work(system_percpu_wq, &fqdir_free_work, HZ);
}
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77a0b52b2eab..b7024e3d9ac3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -58,6 +58,14 @@ static u32 sk_ehashfn(const struct sock *sk)
sk->sk_daddr, sk->sk_dport);
}
+static bool sk_is_connect_bind(const struct sock *sk)
+{
+ if (sk->sk_state == TCP_TIME_WAIT)
+ return inet_twsk(sk)->tw_connect_bind;
+ else
+ return sk->sk_userlocks & SOCK_CONNECT_BIND;
+}
+
/*
* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
@@ -87,10 +95,22 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
*/
void inet_bind_bucket_destroy(struct inet_bind_bucket *tb)
{
+ const struct inet_bind2_bucket *tb2;
+
if (hlist_empty(&tb->bhash2)) {
hlist_del_rcu(&tb->node);
kfree_rcu(tb, rcu);
+ return;
+ }
+
+ if (tb->fastreuse == -1 && tb->fastreuseport == -1)
+ return;
+ hlist_for_each_entry(tb2, &tb->bhash2, bhash_node) {
+ if (tb2->fastreuse != -1 || tb2->fastreuseport != -1)
+ return;
}
+ tb->fastreuse = -1;
+ tb->fastreuseport = -1;
}
bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
@@ -121,6 +141,8 @@ static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2,
#else
tb2->rcv_saddr = sk->sk_rcv_saddr;
#endif
+ tb2->fastreuse = 0;
+ tb2->fastreuseport = 0;
INIT_HLIST_HEAD(&tb2->owners);
hlist_add_head(&tb2->node, &head->chain);
hlist_add_head(&tb2->bhash_node, &tb->bhash2);
@@ -143,11 +165,23 @@ struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
/* Caller must hold hashbucket lock for this tb with local BH disabled */
void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
{
+ const struct sock *sk;
+
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
__hlist_del(&tb->bhash_node);
kmem_cache_free(cachep, tb);
+ return;
+ }
+
+ if (tb->fastreuse == -1 && tb->fastreuseport == -1)
+ return;
+ sk_for_each_bound(sk, &tb->owners) {
+ if (!sk_is_connect_bind(sk))
+ return;
}
+ tb->fastreuse = -1;
+ tb->fastreuseport = -1;
}
static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
@@ -191,6 +225,7 @@ static void __inet_put_port(struct sock *sk)
tb = inet_csk(sk)->icsk_bind_hash;
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
+ sk->sk_userlocks &= ~SOCK_CONNECT_BIND;
spin_lock(&head2->lock);
if (inet_csk(sk)->icsk_bind2_hash) {
@@ -277,7 +312,7 @@ bhash2_find:
}
}
if (update_fastreuse)
- inet_csk_update_fastreuse(tb, child);
+ inet_csk_update_fastreuse(child, tb, tb2);
inet_bind_hash(child, tb, tb2, port);
spin_unlock(&head2->lock);
spin_unlock(&head->lock);
@@ -425,19 +460,18 @@ struct sock *inet_lookup_run_sk_lookup(const struct net *net,
}
struct sock *__inet_lookup_listener(const struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, __be16 sport,
const __be32 daddr, const unsigned short hnum,
const int dif, const int sdif)
{
struct inet_listen_hashbucket *ilb2;
+ struct inet_hashinfo *hashinfo;
struct sock *result = NULL;
unsigned int hash2;
/* Lookup redirect from BPF */
- if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
- hashinfo == net->ipv4.tcp_death_row.hashinfo) {
+ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
saddr, sport, daddr, hnum, dif,
inet_ehashfn);
@@ -445,6 +479,7 @@ struct sock *__inet_lookup_listener(const struct net *net,
goto done;
}
+ hashinfo = net->ipv4.tcp_death_row.hashinfo;
hash2 = ipv4_portaddr_hash(net, daddr, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
@@ -490,21 +525,22 @@ void sock_edemux(struct sk_buff *skb)
EXPORT_SYMBOL(sock_edemux);
struct sock *__inet_lookup_established(const struct net *net,
- struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const u16 hnum,
- const int dif, const int sdif)
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 hnum,
+ const int dif, const int sdif)
{
- INET_ADDR_COOKIE(acookie, saddr, daddr);
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
- struct sock *sk;
+ INET_ADDR_COOKIE(acookie, saddr, daddr);
const struct hlist_nulls_node *node;
- /* Optimize here for direct hit, only listening connections can
- * have wildcards anyways.
- */
- unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
- unsigned int slot = hash & hashinfo->ehash_mask;
- struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+ struct inet_ehash_bucket *head;
+ struct inet_hashinfo *hashinfo;
+ unsigned int hash, slot;
+ struct sock *sk;
+
+ hashinfo = net->ipv4.tcp_death_row.hashinfo;
+ hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
+ slot = hash & hashinfo->ehash_mask;
+ head = &hashinfo->ehash[slot];
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
@@ -579,8 +615,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
- if (sk->sk_protocol == IPPROTO_TCP &&
- tcp_twsk_unique(sk, sk2, twp))
+ if (tcp_twsk_unique(sk, sk2, twp))
break;
}
goto not_unique;
@@ -707,7 +742,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
if (ok) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
} else {
- this_cpu_inc(*sk->sk_prot->orphan_count);
+ tcp_orphan_count_inc();
inet_sk_set_state(sk, TCP_CLOSE);
sock_set_flag(sk, SOCK_DEAD);
inet_csk_destroy_sock(sk);
@@ -721,8 +756,8 @@ static int inet_reuseport_add_sock(struct sock *sk,
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
@@ -730,7 +765,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
inet_csk(sk2)->icsk_bind_hash == tb &&
- sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
+ sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false))
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
@@ -739,15 +774,18 @@ static int inet_reuseport_add_sock(struct sock *sk,
return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
}
-int __inet_hash(struct sock *sk, struct sock *osk)
+int inet_hash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk);
struct inet_listen_hashbucket *ilb2;
int err = 0;
+ if (sk->sk_state == TCP_CLOSE)
+ return 0;
+
if (sk->sk_state != TCP_LISTEN) {
local_bh_disable();
- inet_ehash_nolisten(sk, osk, NULL);
+ inet_ehash_nolisten(sk, NULL, NULL);
local_bh_enable();
return 0;
}
@@ -772,17 +810,7 @@ unlock:
return err;
}
-EXPORT_IPV6_MOD(__inet_hash);
-
-int inet_hash(struct sock *sk)
-{
- int err = 0;
-
- if (sk->sk_state != TCP_CLOSE)
- err = __inet_hash(sk, NULL);
-
- return err;
-}
+EXPORT_IPV6_MOD(inet_hash);
void inet_unhash(struct sock *sk)
{
@@ -800,11 +828,6 @@ void inet_unhash(struct sock *sk)
* avoid circular locking dependency on PREEMPT_RT.
*/
spin_lock(&ilb2->lock);
- if (sk_unhashed(sk)) {
- spin_unlock(&ilb2->lock);
- return;
- }
-
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_stop_listen_sock(sk);
@@ -815,10 +838,6 @@ void inet_unhash(struct sock *sk)
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock);
- if (sk_unhashed(sk)) {
- spin_unlock_bh(lock);
- return;
- }
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
@@ -966,6 +985,10 @@ static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family,
if (!tb2) {
tb2 = new_tb2;
inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk);
+ if (sk_is_connect_bind(sk)) {
+ tb2->fastreuse = -1;
+ tb2->fastreuseport = -1;
+ }
}
inet_csk(sk)->icsk_bind2_hash = tb2;
sk_add_bind_node(sk, &tb2->owners);
@@ -1136,6 +1159,8 @@ ok:
head2, tb, sk);
if (!tb2)
goto error;
+ tb2->fastreuse = -1;
+ tb2->fastreuseport = -1;
}
/* Here we want to add a little bit of randomness to the next source
@@ -1148,6 +1173,7 @@ ok:
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, tb2, port);
+ sk->sk_userlocks |= SOCK_CONNECT_BIND;
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 875ff923a8ed..c96d61d08854 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -15,7 +15,8 @@
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
-
+#include <net/tcp.h>
+#include <net/psp.h>
/**
* inet_twsk_bind_unhash - unhash a timewait socket from bind hash
@@ -74,7 +75,8 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
void inet_twsk_free(struct inet_timewait_sock *tw)
{
struct module *owner = tw->tw_prot->owner;
- twsk_destructor((struct sock *)tw);
+
+ tcp_twsk_destructor((struct sock *)tw);
kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
module_put(owner);
}
@@ -206,10 +208,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
+ tw->tw_connect_bind = !!(sk->sk_userlocks & SOCK_CONNECT_BIND);
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
timer_setup(&tw->tw_timer, tw_timer_handler, 0);
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ tw->tw_validate_xmit_skb = NULL;
+#endif
/*
* Because we use RCU lookups, we should not set tw_refcnt
* to a non null value before everything is setup for this
@@ -218,6 +224,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
refcount_set(&tw->tw_refcnt, 0);
__module_get(tw->tw_prot->owner);
+ psp_twsk_init(tw, sk);
}
return tw;
@@ -329,13 +336,13 @@ restart:
TCPF_NEW_SYN_RECV))
continue;
- if (refcount_read(&sock_net(sk)->ns.count))
+ if (check_net(sock_net(sk)))
continue;
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
continue;
- if (refcount_read(&sock_net(sk)->ns.count)) {
+ if (check_net(sock_net(sk))) {
sock_gen_put(sk);
goto restart;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 64b3fb3208af..f7012479713b 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -476,14 +476,16 @@ out_fail:
/* Process an incoming IP datagram fragment. */
int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
{
- struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
- int vif = l3mdev_master_ifindex_rcu(dev);
+ struct net_device *dev;
struct ipq *qp;
+ int vif;
__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
/* Lookup (or create) queue header */
rcu_read_lock();
+ dev = skb->dev ? : skb_dst_dev_rcu(skb);
+ vif = l3mdev_master_ifindex_rcu(dev);
qp = ip_find(net, ip_hdr(skb), user, vif);
if (qp) {
int ret, refs = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f5b9004d6938..761a53c6a89a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -28,6 +28,7 @@
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <net/flow.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -44,7 +45,6 @@
#include <net/gre.h>
#include <net/dst_metadata.h>
#include <net/erspan.h>
-#include <net/inet_dscp.h>
/*
Problems & solutions
@@ -930,7 +930,7 @@ static int ipgre_open(struct net_device *dev)
if (ipv4_is_multicast(t->parms.iph.daddr)) {
struct flowi4 fl4 = {
.flowi4_oif = t->parms.link,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(&t->parms.iph)),
+ .flowi4_dscp = ip4h_dscp(&t->parms.iph),
.flowi4_scope = RT_SCOPE_UNIVERSE,
.flowi4_proto = IPPROTO_GRE,
.saddr = t->parms.iph.saddr,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 30a5e9460d00..273578579a6b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -226,6 +226,12 @@ resubmit:
static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
+ return 0;
+ }
+
skb_clear_delivery_time(skb);
__skb_pull(skb, skb_network_header_len(skb));
@@ -257,10 +263,11 @@ int ip_local_deliver(struct sk_buff *skb)
}
EXPORT_SYMBOL(ip_local_deliver);
-static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
+static inline enum skb_drop_reason
+ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
{
- struct ip_options *opt;
const struct iphdr *iph;
+ struct ip_options *opt;
/* It looks as overkill, because not all
IP options require packet mangling.
@@ -271,7 +278,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
*/
if (skb_cow(skb, skb_headroom(skb))) {
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
- goto drop;
+ return SKB_DROP_REASON_NOMEM;
}
iph = ip_hdr(skb);
@@ -280,7 +287,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
if (ip_options_compile(dev_net(dev), opt, skb)) {
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
- goto drop;
+ return SKB_DROP_REASON_IP_INHDR;
}
if (unlikely(opt->srr)) {
@@ -292,17 +299,15 @@ static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
net_info_ratelimited("source route option %pI4 -> %pI4\n",
&iph->saddr,
&iph->daddr);
- goto drop;
+ return SKB_DROP_REASON_NOT_SPECIFIED;
}
}
if (ip_options_rcv_srr(skb, dev))
- goto drop;
+ return SKB_DROP_REASON_NOT_SPECIFIED;
}
- return false;
-drop:
- return true;
+ return SKB_NOT_DROPPED_YET;
}
static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
@@ -313,14 +318,14 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
}
int tcp_v4_early_demux(struct sk_buff *skb);
-int udp_v4_early_demux(struct sk_buff *skb);
+enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
static int ip_rcv_finish_core(struct net *net,
struct sk_buff *skb, struct net_device *dev,
const struct sk_buff *hint)
{
const struct iphdr *iph = ip_hdr(skb);
- int err, drop_reason;
struct rtable *rt;
+ int drop_reason;
if (ip_can_use_hint(skb, iph, hint)) {
drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr,
@@ -329,7 +334,6 @@ static int ip_rcv_finish_core(struct net *net,
goto drop_error;
}
- drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
!skb_dst(skb) &&
!skb->sk &&
@@ -345,8 +349,8 @@ static int ip_rcv_finish_core(struct net *net,
break;
case IPPROTO_UDP:
if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
- err = udp_v4_early_demux(skb);
- if (unlikely(err))
+ drop_reason = udp_v4_early_demux(skb);
+ if (unlikely(drop_reason))
goto drop_error;
/* must reload iph, skb->head might have changed */
@@ -365,7 +369,6 @@ static int ip_rcv_finish_core(struct net *net,
ip4h_dscp(iph), dev);
if (unlikely(drop_reason))
goto drop_error;
- drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
} else {
struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -384,8 +387,11 @@ static int ip_rcv_finish_core(struct net *net,
}
#endif
- if (iph->ihl > 5 && ip_rcv_options(skb, dev))
- goto drop;
+ if (iph->ihl > 5) {
+ drop_reason = ip_rcv_options(skb, dev);
+ if (drop_reason)
+ goto drop;
+ }
rt = skb_rtable(skb);
if (rt->rt_type == RTN_MULTICAST) {
@@ -580,9 +586,13 @@ static void ip_sublist_rcv_finish(struct list_head *head)
}
static struct sk_buff *ip_extract_route_hint(const struct net *net,
- struct sk_buff *skb, int rt_type)
+ struct sk_buff *skb)
{
- if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (fib4_has_custom_rules(net) ||
+ ipv4_is_lbcast(iph->daddr) ||
+ ipv4_is_zeronet(iph->daddr) ||
IPCB(skb)->flags & IPSKB_MULTIPATH)
return NULL;
@@ -611,8 +621,7 @@ static void ip_list_rcv_finish(struct net *net, struct list_head *head)
dst = skb_dst(skb);
if (curr_dst != dst) {
- hint = ip_extract_route_hint(net, skb,
- dst_rtable(dst)->rt_type);
+ hint = ip_extract_route_hint(net, skb);
/* dispatch old sublist */
if (!list_empty(&sublist))
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index e3321932bec0..be8815ce3ac2 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -615,14 +615,13 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
}
memcpy(&nexthop, &optptr[srrptr-1], 4);
- orefdst = skb->_skb_refdst;
- skb_dst_set(skb, NULL);
+ orefdst = skb_dstref_steal(skb);
err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph),
dev) ? -EINVAL : 0;
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
- skb->_skb_refdst = orefdst;
+ skb_dstref_restore(skb, orefdst);
return -EINVAL;
}
refdst_drop(orefdst);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a2705d454fd6..ff11d3a85a36 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -63,6 +63,7 @@
#include <linux/stat.h>
#include <linux/init.h>
+#include <net/flow.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -83,6 +84,7 @@
#include <linux/netfilter_bridge.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
+#include <net/psp.h>
static int
ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -116,7 +118,7 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, NULL, skb_dst_dev(skb),
dst_output);
}
@@ -199,7 +201,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = dst_rtable(dst);
- struct net_device *dev = dst->dev;
+ struct net_device *dev = dst_dev(dst);
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
bool is_v6gw = false;
@@ -425,15 +427,20 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
+ struct net_device *dev, *indev = skb->dev;
+ int ret_val;
+ rcu_read_lock();
+ dev = skb_dst_dev_rcu(skb);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, indev, dev,
- ip_finish_output,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+ net, sk, skb, indev, dev,
+ ip_finish_output,
+ !(IPCB(skb)->flags & IPSKB_REROUTED));
+ rcu_read_unlock();
+ return ret_val;
}
EXPORT_SYMBOL(ip_output);
@@ -480,7 +487,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
inet_sk_init_flowi4(inet, fl4);
/* sctp_v4_xmit() uses its own DSCP value */
- fl4->flowi4_tos = tos & INET_DSCP_MASK;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
@@ -1222,8 +1229,7 @@ alloc_new_skb:
if (WARN_ON_ONCE(copy > msg->msg_iter.count))
goto error;
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0)
goto error;
copy = err;
@@ -1660,8 +1666,10 @@ void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
- if (orig_sk)
+ if (orig_sk) {
skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
+ psp_reply_set_decrypted(orig_sk, nskb);
+ }
if (transmit_time)
nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
if (txhash)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 678b8f96e3e9..158a30ae7c5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -568,20 +568,6 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
-{
- /* we must cap headroom to some upperlimit, else pskb_expand_head
- * will overflow header offsets in skb_headers_offset_update().
- */
- static const unsigned int max_allowed = 512;
-
- if (headroom > max_allowed)
- headroom = max_allowed;
-
- if (headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, headroom);
-}
-
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
@@ -668,7 +654,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_adj_headroom(dev, headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return;
tx_error:
DEV_STATS_INC(dev, tx_errors);
@@ -857,7 +843,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_adj_headroom(dev, max_headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index f65d2f727381..2e61ac137128 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(ip6tun_encaps);
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df, bool xnet)
+ __u8 tos, __u8 ttl, __be16 df, bool xnet,
+ u16 ipcb_flags)
{
int pkt_len = skb->len - skb_inner_network_offset(skb);
struct net *net = dev_net(rt->dst.dev);
@@ -62,6 +63,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_clear_hash_if_not_l4(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ IPCB(skb)->flags = ipcb_flags;
/* Push down and install the IP header. */
skb_push(skb, sizeof(struct iphdr));
@@ -204,6 +206,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
return -EINVAL;
+ if (skb_is_gso(skb))
+ skb_gso_reset(skb);
+
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);
@@ -298,6 +303,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
return -EINVAL;
+ if (skb_is_gso(skb))
+ skb_gso_reset(skb);
+
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 686e4f3d83aa..95b6bb78fcd2 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -229,7 +229,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error_icmp;
}
- tdev = dst->dev;
+ tdev = dst_dev(dst);
if (tdev == dev) {
dst_release(dst);
@@ -259,7 +259,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
xmit:
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
skb_dst_set(skb, dst);
- skb->dev = skb_dst(skb)->dev;
+ skb->dev = skb_dst_dev(skb);
err = dst_output(tunnel->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 5a4fb2539b08..9a45aed508d1 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -54,6 +54,7 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info)
}
/* We always hold one tunnel user reference to indicate a tunnel */
+static struct lock_class_key xfrm_state_lock_key;
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -62,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
t = xfrm_state_alloc(net);
if (!t)
goto out;
+ lockdep_set_class(&t->lock, &xfrm_state_lock_key);
t->id.proto = IPPROTO_IPIP;
t->id.spi = x->props.saddr.a4;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index c56b6fe6f0d7..22a7889876c1 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -274,9 +274,9 @@ static int __init ic_open_devs(void)
/* wait for a carrier on at least one device */
start = jiffies;
- next_msg = start + msecs_to_jiffies(20000);
+ next_msg = start + secs_to_jiffies(20);
while (time_before(jiffies, start +
- msecs_to_jiffies(carrier_timeout * 1000))) {
+ secs_to_jiffies(carrier_timeout))) {
int wait, elapsed;
rtnl_lock();
@@ -295,7 +295,7 @@ static int __init ic_open_devs(void)
elapsed = jiffies_to_msecs(jiffies - start);
wait = (carrier_timeout * 1000 - elapsed + 500) / 1000;
pr_info("Waiting up to %d more seconds for network.\n", wait);
- next_msg = jiffies + msecs_to_jiffies(20000);
+ next_msg = jiffies + secs_to_jiffies(20);
}
have_carrier:
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a7d09ae9d761..ca9eaee4c2ef 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -42,6 +42,7 @@
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
+#include <net/flow.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -901,7 +902,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
(VIFF_TUNNEL | VIFF_REGISTER));
- err = dev_get_port_parent_id(dev, &ppid, true);
+ err = netif_get_port_parent_id(dev, &ppid, true);
if (err == 0) {
memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
v->dev_parent_id.id_len = ppid.id_len;
@@ -1853,20 +1854,19 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
/* Processing handlers for ipmr_forward, under rcu_read_lock() */
-static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
- int in_vifi, struct sk_buff *skb, int vifi)
+static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
{
const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *vif_dev;
- struct net_device *dev;
struct rtable *rt;
struct flowi4 fl4;
int encap = 0;
vif_dev = vif_dev_read(vif);
if (!vif_dev)
- goto out_free;
+ return -1;
if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
@@ -1874,12 +1874,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
- goto out_free;
+ return -1;
}
- if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
- goto out_free;
-
if (vif->flags & VIFF_TUNNEL) {
rt = ip_route_output_ports(net, &fl4, NULL,
vif->remote, vif->local,
@@ -1887,7 +1884,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
IPPROTO_IPIP,
iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
- goto out_free;
+ return -1;
encap = sizeof(struct iphdr);
} else {
rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
@@ -1895,11 +1892,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
IPPROTO_IPIP,
iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
- goto out_free;
+ return -1;
}
- dev = rt->dst.dev;
-
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
* allow to send ICMP, so that packets will disappear
@@ -1907,14 +1902,14 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
*/
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
- goto out_free;
+ return -1;
}
- encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
+ encap += LL_RESERVED_SPACE(dst_dev_rcu(&rt->dst)) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
- goto out_free;
+ return -1;
}
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
@@ -1934,6 +1929,22 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
}
+ return 0;
+}
+
+static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
+ int in_vifi, struct sk_buff *skb, int vifi)
+{
+ struct rtable *rt;
+
+ if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
+ goto out_free;
+
+ if (ipmr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ rt = skb_rtable(skb);
+
IPCB(skb)->flags |= IPSKB_FORWARDED;
/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
@@ -1947,7 +1958,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* result in receiving multiple packets.
*/
NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, dev,
+ net, NULL, skb, skb->dev, dst_dev_rcu(&rt->dst),
ipmr_forward_finish);
return;
@@ -1955,6 +1966,19 @@ out_free:
kfree_skb(skb);
}
+static void ipmr_queue_output_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ if (ipmr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ ip_mc_output(net, NULL, skb);
+ return;
+
+out_free:
+ kfree_skb(skb);
+}
+
/* Called with mrt_lock or rcu_read_lock() */
static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
{
@@ -2065,8 +2089,8 @@ forward:
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(net, mrt, true_vifi,
- skb2, psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi,
+ skb2, psend);
}
psend = ct;
}
@@ -2077,10 +2101,10 @@ last_forward:
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(net, mrt, true_vifi, skb2,
- psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb2,
+ psend);
} else {
- ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb, psend);
return;
}
}
@@ -2097,7 +2121,7 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)),
+ .flowi4_dscp = ip4h_dscp(iph),
.flowi4_oif = (rt_is_output_route(rt) ?
skb->dev->ifindex : 0),
.flowi4_iif = (rt_is_output_route(rt) ?
@@ -2214,6 +2238,110 @@ dont_forward:
return 0;
}
+static void ip_mr_output_finish(struct net *net, struct mr_table *mrt,
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc_cache *c)
+{
+ int psend = -1;
+ int ct;
+
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+
+ /* Forward the frame */
+ if (c->mfc_origin == htonl(INADDR_ANY) &&
+ c->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ if (ip_hdr(skb)->ttl >
+ c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
+ /* It's an (*,*) entry and the packet is not coming from
+ * the upstream: forward the packet to the upstream
+ * only.
+ */
+ psend = c->_c.mfc_parent;
+ goto last_xmit;
+ }
+ goto dont_xmit;
+ }
+
+ for (ct = c->_c.mfc_un.res.maxvif - 1;
+ ct >= c->_c.mfc_un.res.minvif; ct--) {
+ if (ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
+ if (psend != -1) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2)
+ ipmr_queue_output_xmit(net, mrt,
+ skb2, psend);
+ }
+ psend = ct;
+ }
+ }
+
+last_xmit:
+ if (psend != -1) {
+ ipmr_queue_output_xmit(net, mrt, skb, psend);
+ return;
+ }
+
+dont_xmit:
+ kfree_skb(skb);
+}
+
+/* Multicast packets for forwarding arrive here
+ * Called with rcu_read_lock();
+ */
+int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct rtable *rt = skb_rtable(skb);
+ struct mfc_cache *cache;
+ struct net_device *dev;
+ struct mr_table *mrt;
+ int vif;
+
+ guard(rcu)();
+
+ dev = dst_dev_rcu(&rt->dst);
+
+ if (IPCB(skb)->flags & IPSKB_FORWARDED)
+ goto mc_output;
+ if (!(IPCB(skb)->flags & IPSKB_MCROUTE))
+ goto mc_output;
+
+ skb->dev = dev;
+
+ mrt = ipmr_rt_fib_lookup(net, skb);
+ if (IS_ERR(mrt))
+ goto mc_output;
+
+ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+ if (!cache) {
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif >= 0)
+ cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
+ vif);
+ }
+
+ /* No usable cache entry */
+ if (!cache) {
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif >= 0)
+ return ipmr_cache_unresolved(mrt, vif, skb, dev);
+ goto mc_output;
+ }
+
+ vif = cache->_c.mfc_parent;
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
+ goto mc_output;
+
+ ip_mr_output_finish(net, mrt, dev, skb, cache);
+ return 0;
+
+mc_output:
+ return ip_mc_output(net, sk, skb);
+}
+
#ifdef CONFIG_IP_PIMSM_V1
/* Handle IGMP messages of PIMv1 */
int pim_rcv_v1(struct sk_buff *skb)
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 08bc3f2c0078..ce310eb779e0 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -11,21 +11,21 @@
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <net/flow.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
-#include <net/inet_dscp.h>
#include <net/netfilter/nf_queue.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type)
{
+ struct net_device *dev = skb_dst_dev(skb);
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
__u8 flags;
- struct net_device *dev = skb_dst(skb)->dev;
struct flow_keys flkeys;
unsigned int hh_len;
@@ -44,7 +44,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
*/
fl4.daddr = iph->daddr;
fl4.saddr = saddr;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
+ fl4.flowi4_dscp = ip4h_dscp(iph);
fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev);
fl4.flowi4_mark = skb->mark;
@@ -65,7 +65,10 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(net, skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
struct dst_entry *dst = skb_dst(skb);
- skb_dst_set(skb, NULL);
+ /* ignore return value from skb_dstref_steal, xfrm_lookup takes
+ * care of dropping the refcnt if needed.
+ */
+ skb_dstref_steal(skb);
dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -74,7 +77,7 @@ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, un
#endif
/* Change in oif may mean change in hh_len. */
- hh_len = skb_dst(skb)->dev->hard_header_len;
+ hh_len = skb_dst_dev(skb)->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
0, GFP_ATOMIC))
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index ef8009281da5..7dc9772fe2d8 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -13,8 +13,9 @@ config NF_DEFRAG_IPV4
# old sockopt interface and eval loop
config IP_NF_IPTABLES_LEGACY
tristate "Legacy IP tables support"
- default n
- select NETFILTER_XTABLES
+ depends on NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
+ default m if NETFILTER_XTABLES_LEGACY
help
iptables is a legacy packet classifier.
This is not needed if you are using iptables over nftables
@@ -182,8 +183,8 @@ config IP_NF_MATCH_TTL
# `filter', generic and specific targets
config IP_NF_FILTER
tristate "Packet filtering"
- default m if NETFILTER_ADVANCED=n
- select IP_NF_IPTABLES_LEGACY
+ default m if NETFILTER_ADVANCED=n || IP_NF_IPTABLES_LEGACY
+ depends on IP_NF_IPTABLES_LEGACY
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -220,10 +221,10 @@ config IP_NF_TARGET_SYNPROXY
config IP_NF_NAT
tristate "iptables NAT support"
depends on NF_CONNTRACK
+ depends on IP_NF_IPTABLES_LEGACY
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
- select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
@@ -263,8 +264,8 @@ endif # IP_NF_NAT
# mangle + specific targets
config IP_NF_MANGLE
tristate "Packet mangling"
- default m if NETFILTER_ADVANCED=n
- select IP_NF_IPTABLES_LEGACY
+ default m if NETFILTER_ADVANCED=n || IP_NF_IPTABLES_LEGACY
+ depends on IP_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -299,7 +300,7 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
- select IP_NF_IPTABLES_LEGACY
+ depends on IP_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -313,7 +314,7 @@ config IP_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
- select IP_NF_IPTABLES_LEGACY
+ depends on IP_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -325,8 +326,9 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
tristate "Legacy ARPTABLES support"
+ depends on NETFILTER_XTABLES_LEGACY
depends on NETFILTER_XTABLES
- default n
+ default n
help
arptables is a legacy packet classifier.
This is not needed if you are using arptables over nftables
@@ -342,6 +344,7 @@ config IP_NF_ARPFILTER
tristate "arptables-legacy packet filtering support"
select IP_NF_ARPTABLES
select NETFILTER_FAMILY_ARP
+ depends on NETFILTER_XTABLES_LEGACY
depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index a27782d7653e..6d9bf5106868 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -8,8 +8,8 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#include <net/inet_dscp.h>
#include <linux/ip.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/route.h>
@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- flow.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
+ flow.flowi4_dscp = ip4h_dscp(iph);
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index ed08fb78cfa8..9a773502f10a 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -12,10 +12,10 @@
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/checksum.h>
+#include <net/flow.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/route.h>
-#include <net/inet_dscp.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
@@ -33,7 +33,7 @@ static bool nf_dup_ipv4_route(struct net *net, struct sk_buff *skb,
fl4.flowi4_oif = oif;
fl4.daddr = gw->s_addr;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
+ fl4.flowi4_dscp = ip4h_dscp(iph);
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 87fd945a0d27..fae4aa4a5f09 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -12,6 +12,15 @@
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
+static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int ttl);
+static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+ const struct tcphdr *oth);
+static const struct tcphdr *
+nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *_oth, int hook);
+
static int nf_reject_iphdr_validate(struct sk_buff *skb)
{
struct iphdr *iph;
@@ -71,6 +80,27 @@ struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
+static bool nf_skb_is_icmp_unreach(const struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ u8 *tp, _type;
+ int thoff;
+
+ if (iph->protocol != IPPROTO_ICMP)
+ return false;
+
+ thoff = skb_network_offset(skb) + sizeof(*iph);
+
+ tp = skb_header_pointer(skb,
+ thoff + offsetof(struct icmphdr, type),
+ sizeof(_type), &_type);
+
+ if (!tp)
+ return false;
+
+ return *tp == ICMP_DEST_UNREACH;
+}
+
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
@@ -91,6 +121,10 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
return NULL;
+ /* don't reply to ICMP_DEST_UNREACH with ICMP_DEST_UNREACH. */
+ if (nf_skb_is_icmp_unreach(oldskb))
+ return NULL;
+
/* RFC says return as much as we can without exceeding 576 bytes. */
len = min_t(unsigned int, 536, oldskb->len);
@@ -136,8 +170,9 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
-const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *_oth, int hook)
+static const struct tcphdr *
+nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *_oth, int hook)
{
const struct tcphdr *oth;
@@ -163,11 +198,10 @@ const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
return oth;
}
-EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
-struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int ttl)
+static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int ttl)
{
struct iphdr *niph, *oiph = ip_hdr(oldskb);
@@ -188,10 +222,9 @@ struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
return niph;
}
-EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
-void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
- const struct tcphdr *oth)
+static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
+ const struct tcphdr *oth)
{
struct iphdr *niph = ip_hdr(nskb);
struct tcphdr *tcph;
@@ -218,7 +251,6 @@ void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
nskb->csum_start = (unsigned char *)tcph - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
}
-EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
{
@@ -247,8 +279,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
if (!oth)
return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
- nf_reject_fill_skb_dst(oldskb) < 0)
+ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
return;
if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -321,8 +352,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
if (iph->frag_off & htons(IP_OFFSET))
return;
- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
- nf_reject_fill_skb_dst(skb_in) < 0)
+ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
return;
if (skb_csum_unnecessary(skb_in) ||
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c
index a1350fc25838..5080fa5fbf6a 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -71,8 +71,7 @@ nf_socket_get_sock_v4(struct net *net, struct sk_buff *skb, const int doff,
{
switch (protocol) {
case IPPROTO_TCP:
- return inet_lookup(net, net->ipv4.tcp_death_row.hashinfo,
- skb, doff, saddr, sport, daddr, dport,
+ return inet_lookup(net, skb, doff, saddr, sport, daddr, dport,
in->ifindex);
case IPPROTO_UDP:
return udp4_lib_lookup(net, saddr, sport, daddr, dport,
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 73e66a088e25..041c3f37f237 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -81,7 +81,6 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const struct net_device *in,
const enum nf_tproxy_lookup_t lookup_type)
{
- struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo;
struct sock *sk;
switch (protocol) {
@@ -95,7 +94,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
switch (lookup_type) {
case NF_TPROXY_LOOKUP_LISTENER:
- sk = inet_lookup_listener(net, hinfo, skb,
+ sk = inet_lookup_listener(net, skb,
ip_hdrlen(skb) + __tcp_hdrlen(hp),
saddr, sport, daddr, dport,
in->ifindex, 0);
@@ -109,7 +108,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
*/
break;
case NF_TPROXY_LOOKUP_ESTABLISHED:
- sk = inet_lookup_established(net, hinfo, saddr, sport,
+ sk = inet_lookup_established(net, saddr, sport,
daddr, dport, in->ifindex);
break;
default:
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 7e7c49535e3f..82af6cd76d13 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -10,7 +10,7 @@
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_fib.h>
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/route.h>
@@ -114,7 +114,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
if (priv->flags & NFTA_FIB_F_MARK)
fl4.flowi4_mark = pkt->skb->mark;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
+ fl4.flowi4_dscp = ip4h_dscp(iph);
if (priv->flags & NFTA_FIB_F_DADDR) {
fl4.daddr = iph->daddr;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 4397e89d3123..7b9d70f9b31c 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -985,8 +985,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
break;
}
- if (nhi->fib_nhc.nhc_lwtstate &&
- lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
+ if (lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
goto nla_put_failure;
@@ -2088,6 +2087,12 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
{
struct nh_grp_entry *nhge, *tmp;
+ /* If there is nothing to do, let's avoid the costly call to
+ * synchronize_net()
+ */
+ if (list_empty(&nh->grp_list))
+ return;
+
list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
remove_nh_grp_entry(net, nhge, nlinfo);
@@ -2400,6 +2405,13 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old,
return -EINVAL;
}
+ if (!list_empty(&old->grp_list) &&
+ rtnl_dereference(new->nh_info)->fdb_nh !=
+ rtnl_dereference(old->nh_info)->fdb_nh) {
+ NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group");
+ return -EINVAL;
+ }
+
err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
if (err)
return err;
@@ -3512,12 +3524,42 @@ static int rtm_dump_walk_nexthops(struct sk_buff *skb,
int err;
s_idx = ctx->idx;
- for (node = rb_first(root); node; node = rb_next(node)) {
+
+ /* If this is not the first invocation, ctx->idx will contain the id of
+ * the last nexthop we processed. Instead of starting from the very
+ * first element of the red/black tree again and linearly skipping the
+ * (potentially large) set of nodes with an id smaller than s_idx, walk
+ * the tree and find the left-most node whose id is >= s_idx. This
+ * provides an efficient O(log n) starting point for the dump
+ * continuation.
+ */
+ if (s_idx != 0) {
+ struct rb_node *tmp = root->rb_node;
+
+ node = NULL;
+ while (tmp) {
+ struct nexthop *nh;
+
+ nh = rb_entry(tmp, struct nexthop, rb_node);
+ if (nh->id < s_idx) {
+ tmp = tmp->rb_right;
+ } else {
+ /* Track current candidate and keep looking on
+ * the left side to find the left-most
+ * (smallest id) that is still >= s_idx.
+ */
+ node = tmp;
+ tmp = tmp->rb_left;
+ }
+ }
+ } else {
+ node = rb_first(root);
+ }
+
+ for (; node; node = rb_next(node)) {
struct nexthop *nh;
nh = rb_entry(node, struct nexthop, rb_node);
- if (nh->id < s_idx)
- continue;
ctx->idx = nh->id;
err = nh_cb(skb, cb, nh, data);
@@ -3885,7 +3927,7 @@ static int nh_netdev_event(struct notifier_block *this,
nexthop_flush_dev(dev, event);
break;
case NETDEV_CHANGE:
- if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
+ if (!(netif_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
nexthop_flush_dev(dev, event);
break;
case NETDEV_CHANGEMTU:
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c14baa6589c7..5321c5801c64 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -56,9 +56,7 @@ struct ping_table {
static struct ping_table ping_table;
struct pingv6_ops pingv6_ops;
-EXPORT_SYMBOL_GPL(pingv6_ops);
-
-static u16 ping_port_rover;
+EXPORT_IPV6_MOD_GPL(pingv6_ops);
static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask)
{
@@ -67,7 +65,6 @@ static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask)
pr_debug("hash(%u) = %u\n", num, res);
return res;
}
-EXPORT_SYMBOL_GPL(ping_hash);
static inline struct hlist_head *ping_hashslot(struct ping_table *table,
struct net *net, unsigned int num)
@@ -77,6 +74,7 @@ static inline struct hlist_head *ping_hashslot(struct ping_table *table,
int ping_get_port(struct sock *sk, unsigned short ident)
{
+ struct net *net = sock_net(sk);
struct inet_sock *isk, *isk2;
struct hlist_head *hlist;
struct sock *sk2 = NULL;
@@ -84,15 +82,16 @@ int ping_get_port(struct sock *sk, unsigned short ident)
isk = inet_sk(sk);
spin_lock(&ping_table.lock);
if (ident == 0) {
+ u16 result = net->ipv4.ping_port_rover + 1;
u32 i;
- u16 result = ping_port_rover + 1;
for (i = 0; i < (1L << 16); i++, result++) {
if (!result)
- result++; /* avoid zero */
- hlist = ping_hashslot(&ping_table, sock_net(sk),
- result);
+ continue; /* avoid zero */
+ hlist = ping_hashslot(&ping_table, net, result);
sk_for_each(sk2, hlist) {
+ if (!net_eq(sock_net(sk2), net))
+ continue;
isk2 = inet_sk(sk2);
if (isk2->inet_num == result)
@@ -100,7 +99,7 @@ int ping_get_port(struct sock *sk, unsigned short ident)
}
/* found */
- ping_port_rover = ident = result;
+ net->ipv4.ping_port_rover = ident = result;
break;
next_port:
;
@@ -108,8 +107,10 @@ next_port:
if (i >= (1L << 16))
goto fail;
} else {
- hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
+ hlist = ping_hashslot(&ping_table, net, ident);
sk_for_each(sk2, hlist) {
+ if (!net_eq(sock_net(sk2), net))
+ continue;
isk2 = inet_sk(sk2);
/* BUG? Why is this reuse and not reuseaddr? ping.c
@@ -129,7 +130,7 @@ next_port:
pr_debug("was not hashed\n");
sk_add_node_rcu(sk, hlist);
sock_set_flag(sk, SOCK_RCU_FREE);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
}
spin_unlock(&ping_table.lock);
return 0;
@@ -138,15 +139,7 @@ fail:
spin_unlock(&ping_table.lock);
return -EADDRINUSE;
}
-EXPORT_SYMBOL_GPL(ping_get_port);
-
-int ping_hash(struct sock *sk)
-{
- pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
- BUG(); /* "Please do not press this button again." */
-
- return 0;
-}
+EXPORT_IPV6_MOD_GPL(ping_get_port);
void ping_unhash(struct sock *sk)
{
@@ -161,7 +154,7 @@ void ping_unhash(struct sock *sk)
}
spin_unlock(&ping_table.lock);
}
-EXPORT_SYMBOL_GPL(ping_unhash);
+EXPORT_IPV6_MOD_GPL(ping_unhash);
/* Called under rcu_read_lock() */
static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
@@ -188,6 +181,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
}
sk_for_each_rcu(sk, hslot) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
isk = inet_sk(sk);
pr_debug("iterate\n");
@@ -279,7 +274,7 @@ out_release_group:
put_group_info(group_info);
return ret;
}
-EXPORT_SYMBOL_GPL(ping_init_sock);
+EXPORT_IPV6_MOD_GPL(ping_init_sock);
void ping_close(struct sock *sk, long timeout)
{
@@ -289,7 +284,7 @@ void ping_close(struct sock *sk, long timeout)
sk_common_release(sk);
}
-EXPORT_SYMBOL_GPL(ping_close);
+EXPORT_IPV6_MOD_GPL(ping_close);
static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
@@ -467,7 +462,7 @@ out:
pr_debug("ping_v4_bind -> %d\n", err);
return err;
}
-EXPORT_SYMBOL_GPL(ping_bind);
+EXPORT_IPV6_MOD_GPL(ping_bind);
/*
* Is this a supported type of ICMP message?
@@ -600,7 +595,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
out:
return;
}
-EXPORT_SYMBOL_GPL(ping_err);
+EXPORT_IPV6_MOD_GPL(ping_err);
/*
* Copy and checksum an ICMP Echo packet from user space into a buffer
@@ -630,7 +625,7 @@ int ping_getfrag(void *from, char *to,
return 0;
}
-EXPORT_SYMBOL_GPL(ping_getfrag);
+EXPORT_IPV6_MOD_GPL(ping_getfrag);
static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
struct flowi4 *fl4)
@@ -691,7 +686,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
return 0;
}
-EXPORT_SYMBOL_GPL(ping_common_sendmsg);
+EXPORT_IPV6_MOD_GPL(ping_common_sendmsg);
static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
@@ -781,7 +776,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark,
ipc.tos & INET_DSCP_MASK, scope,
sk->sk_protocol, inet_sk_flowi_flags(sk), faddr,
- saddr, 0, 0, sk->sk_uid);
+ saddr, 0, 0, sk_uid(sk));
fl4.fl4_icmp_type = user_icmph.type;
fl4.fl4_icmp_code = user_icmph.code;
@@ -936,7 +931,7 @@ out:
pr_debug("ping_recvmsg -> %d\n", err);
return err;
}
-EXPORT_SYMBOL_GPL(ping_recvmsg);
+EXPORT_IPV6_MOD_GPL(ping_recvmsg);
static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk,
struct sk_buff *skb)
@@ -957,7 +952,7 @@ int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
return __ping_queue_rcv_skb(sk, skb) ? -1 : 0;
}
-EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
+EXPORT_IPV6_MOD_GPL(ping_queue_rcv_skb);
/*
@@ -985,7 +980,7 @@ enum skb_drop_reason ping_rcv(struct sk_buff *skb)
kfree_skb_reason(skb, SKB_DROP_REASON_NO_SOCKET);
return SKB_DROP_REASON_NO_SOCKET;
}
-EXPORT_SYMBOL_GPL(ping_rcv);
+EXPORT_IPV6_MOD_GPL(ping_rcv);
struct proto ping_prot = {
.name = "PING",
@@ -1002,13 +997,12 @@ struct proto ping_prot = {
.bind = ping_bind,
.backlog_rcv = ping_queue_rcv_skb,
.release_cb = ip4_datagram_release_cb,
- .hash = ping_hash,
.unhash = ping_unhash,
.get_port = ping_get_port,
.put_port = ping_unhash,
.obj_size = sizeof(struct inet_sock),
};
-EXPORT_SYMBOL(ping_prot);
+EXPORT_IPV6_MOD(ping_prot);
#ifdef CONFIG_PROC_FS
@@ -1073,7 +1067,7 @@ void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
-EXPORT_SYMBOL_GPL(ping_seq_start);
+EXPORT_IPV6_MOD_GPL(ping_seq_start);
static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos)
{
@@ -1092,14 +1086,14 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
return sk;
}
-EXPORT_SYMBOL_GPL(ping_seq_next);
+EXPORT_IPV6_MOD_GPL(ping_seq_next);
void ping_seq_stop(struct seq_file *seq, void *v)
__releases(ping_table.lock)
{
spin_unlock(&ping_table.lock);
}
-EXPORT_SYMBOL_GPL(ping_seq_stop);
+EXPORT_IPV6_MOD_GPL(ping_seq_stop);
static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
@@ -1116,10 +1110,10 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
- atomic_read(&sp->sk_drops));
+ sk_drops_read(sp));
}
static int ping_v4_seq_show(struct seq_file *seq, void *v)
@@ -1150,6 +1144,8 @@ static int __net_init ping_v4_proc_init_net(struct net *net)
if (!proc_create_net("icmp", 0444, net->proc_net, &ping_v4_seq_ops,
sizeof(struct ping_iter_state)))
return -ENOMEM;
+
+ net->ipv4.ping_port_rover = get_random_u16();
return 0;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ea2f01584379..974afc4ecbe2 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -95,7 +95,6 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS),
SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES),
SNMP_MIB_ITEM("OutTransmits", IPSTATS_MIB_OUTPKTS),
- SNMP_MIB_SENTINEL
};
/* Following items are displayed in /proc/net/netstat */
@@ -119,7 +118,6 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
- SNMP_MIB_SENTINEL
};
static const struct {
@@ -157,7 +155,6 @@ static const struct snmp_mib snmp4_tcp_list[] = {
SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS),
SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS),
SNMP_MIB_ITEM("InCsumErrors", TCP_MIB_CSUMERRORS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp4_udp_list[] = {
@@ -170,7 +167,6 @@ static const struct snmp_mib snmp4_udp_list[] = {
SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_ITEM("MemErrors", UDP_MIB_MEMERRORS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp4_net_list[] = {
@@ -189,6 +185,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED),
SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED),
SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED),
+ SNMP_MIB_ITEM("BeyondWindow", LINUX_MIB_BEYOND_WINDOW),
SNMP_MIB_ITEM("TSEcrRejected", LINUX_MIB_TSECRREJECTED),
SNMP_MIB_ITEM("PAWSOldAck", LINUX_MIB_PAWS_OLD_ACK),
SNMP_MIB_ITEM("PAWSTimewait", LINUX_MIB_PAWS_TW_REJECTED),
@@ -308,7 +305,6 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPAOKeyNotFound", LINUX_MIB_TCPAOKEYNOTFOUND),
SNMP_MIB_ITEM("TCPAOGood", LINUX_MIB_TCPAOGOOD),
SNMP_MIB_ITEM("TCPAODroppedIcmps", LINUX_MIB_TCPAODROPPEDICMPS),
- SNMP_MIB_SENTINEL
};
static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals,
@@ -388,14 +384,15 @@ static void icmp_put(struct seq_file *seq)
*/
static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
{
+ const int cnt = ARRAY_SIZE(snmp4_ipstats_list);
+ u64 buff64[ARRAY_SIZE(snmp4_ipstats_list)];
struct net *net = seq->private;
- u64 buff64[IPSTATS_MIB_MAX];
int i;
- memset(buff64, 0, IPSTATS_MIB_MAX * sizeof(u64));
+ memset(buff64, 0, sizeof(buff64));
seq_puts(seq, "Ip: Forwarding DefaultTTL");
- for (i = 0; snmp4_ipstats_list[i].name; i++)
+ for (i = 0; i < cnt; i++)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
@@ -403,10 +400,10 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
- snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list,
- net->mib.ip_statistics,
- offsetof(struct ipstats_mib, syncp));
- for (i = 0; snmp4_ipstats_list[i].name; i++)
+ snmp_get_cpu_field64_batch_cnt(buff64, snmp4_ipstats_list, cnt,
+ net->mib.ip_statistics,
+ offsetof(struct ipstats_mib, syncp));
+ for (i = 0; i < cnt; i++)
seq_printf(seq, " %llu", buff64[i]);
return 0;
@@ -414,20 +411,23 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
static int snmp_seq_show_tcp_udp(struct seq_file *seq, void *v)
{
+ const int udp_cnt = ARRAY_SIZE(snmp4_udp_list);
+ const int tcp_cnt = ARRAY_SIZE(snmp4_tcp_list);
unsigned long buff[TCPUDP_MIB_MAX];
struct net *net = seq->private;
int i;
- memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
+ memset(buff, 0, tcp_cnt * sizeof(unsigned long));
seq_puts(seq, "\nTcp:");
- for (i = 0; snmp4_tcp_list[i].name; i++)
+ for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %s", snmp4_tcp_list[i].name);
seq_puts(seq, "\nTcp:");
- snmp_get_cpu_field_batch(buff, snmp4_tcp_list,
- net->mib.tcp_statistics);
- for (i = 0; snmp4_tcp_list[i].name; i++) {
+ snmp_get_cpu_field_batch_cnt(buff, snmp4_tcp_list,
+ tcp_cnt,
+ net->mib.tcp_statistics);
+ for (i = 0; i < tcp_cnt; i++) {
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
seq_printf(seq, " %ld", buff[i]);
@@ -435,27 +435,29 @@ static int snmp_seq_show_tcp_udp(struct seq_file *seq, void *v)
seq_printf(seq, " %lu", buff[i]);
}
- memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
+ memset(buff, 0, udp_cnt * sizeof(unsigned long));
- snmp_get_cpu_field_batch(buff, snmp4_udp_list,
- net->mib.udp_statistics);
+ snmp_get_cpu_field_batch_cnt(buff, snmp4_udp_list,
+ udp_cnt,
+ net->mib.udp_statistics);
seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name; i++)
+ for (i = 0; i < udp_cnt; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
seq_puts(seq, "\nUdp:");
- for (i = 0; snmp4_udp_list[i].name; i++)
+ for (i = 0; i < udp_cnt; i++)
seq_printf(seq, " %lu", buff[i]);
- memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long));
+ memset(buff, 0, udp_cnt * sizeof(unsigned long));
/* the UDP and UDP-Lite MIBs are the same */
seq_puts(seq, "\nUdpLite:");
- snmp_get_cpu_field_batch(buff, snmp4_udp_list,
- net->mib.udplite_statistics);
- for (i = 0; snmp4_udp_list[i].name; i++)
+ snmp_get_cpu_field_batch_cnt(buff, snmp4_udp_list,
+ udp_cnt,
+ net->mib.udplite_statistics);
+ for (i = 0; i < udp_cnt; i++)
seq_printf(seq, " %s", snmp4_udp_list[i].name);
seq_puts(seq, "\nUdpLite:");
- for (i = 0; snmp4_udp_list[i].name; i++)
+ for (i = 0; i < udp_cnt; i++)
seq_printf(seq, " %lu", buff[i]);
seq_putc(seq, '\n');
@@ -479,8 +481,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
*/
static int netstat_seq_show(struct seq_file *seq, void *v)
{
- const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list) - 1;
- const int tcp_cnt = ARRAY_SIZE(snmp4_net_list) - 1;
+ const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list);
+ const int tcp_cnt = ARRAY_SIZE(snmp4_net_list);
struct net *net = seq->private;
unsigned long *buff;
int i;
@@ -493,8 +495,8 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
buff = kzalloc(max(tcp_cnt * sizeof(long), ip_cnt * sizeof(u64)),
GFP_KERNEL);
if (buff) {
- snmp_get_cpu_field_batch(buff, snmp4_net_list,
- net->mib.net_statistics);
+ snmp_get_cpu_field_batch_cnt(buff, snmp4_net_list, tcp_cnt,
+ net->mib.net_statistics);
for (i = 0; i < tcp_cnt; i++)
seq_printf(seq, " %lu", buff[i]);
} else {
@@ -512,7 +514,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
u64 *buff64 = (u64 *)buff;
memset(buff64, 0, ip_cnt * sizeof(u64));
- snmp_get_cpu_field64_batch(buff64, snmp4_ipextstats_list,
+ snmp_get_cpu_field64_batch_cnt(buff64, snmp4_ipextstats_list, ip_cnt,
net->mib.ip_statistics,
offsetof(struct ipstats_mib, syncp));
for (i = 0; i < ip_cnt; i++)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6aace4d55733..d54ebb7df966 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -178,7 +178,7 @@ static int raw_v4_input(struct net *net, struct sk_buff *skb,
if (atomic_read(&sk->sk_rmem_alloc) >=
READ_ONCE(sk->sk_rcvbuf)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
continue;
}
@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
@@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
hdrincl ? ipc.protocol : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
- daddr, saddr, 0, 0, sk->sk_uid);
+ daddr, saddr, 0, 0, sk_uid(sk));
fl4.fl4_icmp_type = 0;
fl4.fl4_icmp_code = 0;
@@ -793,6 +793,7 @@ static int raw_sk_init(struct sock *sk)
{
struct raw_sock *rp = raw_sk(sk);
+ sk->sk_drop_counters = &rp->drop_counters;
if (inet_sk(sk)->inet_num == IPPROTO_ICMP)
memset(&rp->filter, 0, sizeof(rp->filter));
return 0;
@@ -1043,9 +1044,9 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0, sock_i_ino(sp),
- refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
+ refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp));
}
static int raw_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index cc793bd8de25..943e5998e0ad 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -126,9 +126,9 @@ static int raw_diag_dump_one(struct netlink_callback *cb,
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
- struct nlattr *bc, bool net_admin)
+ bool net_admin)
{
- if (!inet_diag_bc_sk(bc, sk))
+ if (!inet_diag_bc_sk(cb->data, sk))
return 0;
return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin);
@@ -140,17 +140,13 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
struct net *net = sock_net(skb->sk);
- struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
struct hlist_head *hlist;
struct sock *sk = NULL;
- struct nlattr *bc;
if (IS_ERR(hashinfo))
return;
- cb_data = cb->data;
- bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -174,7 +170,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next;
- if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0)
+ if (sk_diag_dump(sk, skb, cb, r, net_admin) < 0)
goto out_unlock;
next:
num++;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fccb05fb3a79..6d27d3610c1c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -84,6 +84,7 @@
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
+#include <net/flow.h>
#include <net/inet_dscp.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -413,11 +414,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
const void *daddr)
{
const struct rtable *rt = container_of(dst, struct rtable, dst);
- struct net_device *dev = dst->dev;
+ struct net_device *dev;
struct neighbour *n;
rcu_read_lock();
-
+ dev = dst_dev_rcu(dst);
if (likely(rt->rt_gw_family == AF_INET)) {
n = ip_neigh_gw4(dev, rt->rt_gw4);
} else if (rt->rt_gw_family == AF_INET6) {
@@ -440,7 +441,7 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
const struct rtable *rt = container_of(dst, struct rtable, dst);
- struct net_device *dev = dst->dev;
+ struct net_device *dev = dst_dev(dst);
const __be32 *pkey = daddr;
if (rt->rt_gw_family == AF_INET) {
@@ -556,7 +557,8 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
inet_test_bit(HDRINCL, sk) ?
IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
- daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
+ daddr, inet->inet_saddr, 0, 0,
+ sk_uid(sk));
rcu_read_unlock();
}
@@ -716,7 +718,7 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
*/
rt = rcu_dereference(nhc->nhc_rth_input);
if (rt)
- rt->dst.obsolete = DST_OBSOLETE_KILL;
+ WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL);
for_each_possible_cpu(i) {
struct rtable __rcu **prt;
@@ -724,7 +726,7 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
rt = rcu_dereference(*prt);
if (rt)
- rt->dst.obsolete = DST_OBSOLETE_KILL;
+ WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL);
}
}
@@ -796,7 +798,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
jiffies + ip_rt_gc_timeout);
}
if (kill_route)
- rt->dst.obsolete = DST_OBSOLETE_KILL;
+ WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL);
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
neigh_release(n);
@@ -841,9 +843,9 @@ static void ipv4_negative_advice(struct sock *sk,
{
struct rtable *rt = dst_rtable(dst);
- if ((dst->obsolete > 0) ||
+ if ((READ_ONCE(dst->obsolete) > 0) ||
(rt->rt_flags & RTCF_REDIRECTED) ||
- rt->dst.expires)
+ READ_ONCE(rt->dst.expires))
sk_dst_reset(sk);
}
@@ -1025,14 +1027,15 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
return;
rcu_read_lock();
- net = dev_net_rcu(dst->dev);
+ net = dst_dev_net_rcu(dst);
if (mtu < net->ipv4.ip_rt_min_pmtu) {
lock = true;
mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
}
if (rt->rt_pmtu == mtu && !lock &&
- time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
+ time_before(jiffies, READ_ONCE(dst->expires) -
+ net->ipv4.ip_rt_mtu_expires / 2))
goto out;
if (fib_lookup(net, fl4, &res, 0) == 0) {
@@ -1135,7 +1138,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = dst_rtable(odst);
- if (odst->obsolete && !odst->ops->check(odst, 0)) {
+ if (READ_ONCE(odst->obsolete) && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
@@ -1210,7 +1213,8 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
* this is indicated by setting obsolete to DST_OBSOLETE_KILL or
* DST_OBSOLETE_DEAD.
*/
- if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
+ if (READ_ONCE(dst->obsolete) != DST_OBSOLETE_FORCE_CHK ||
+ rt_is_expired(rt))
return NULL;
return dst;
}
@@ -1218,8 +1222,8 @@ EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
+ struct inet_skb_parm parm;
struct net_device *dev;
- struct ip_options opt;
int res;
/* Recompile ip options since IPCB may not be valid anymore.
@@ -1229,21 +1233,21 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
return;
- memset(&opt, 0, sizeof(opt));
+ memset(&parm, 0, sizeof(parm));
if (ip_hdr(skb)->ihl > 5) {
if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
return;
- opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+ parm.opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
- res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
+ res = __ip_options_compile(dev_net(dev), &parm.opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
}
- __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &parm);
}
static void ipv4_link_failure(struct sk_buff *skb)
@@ -1288,7 +1292,7 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)),
+ .flowi4_dscp = ip4h_dscp(iph),
.flowi4_oif = rt->dst.dev->ifindex,
.flowi4_iif = skb->dev->ifindex,
.flowi4_mark = skb->mark,
@@ -1323,7 +1327,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst->dev);
+ net = dst_dev_net_rcu(dst);
advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
net->ipv4.ip_rt_min_advmss);
rcu_read_unlock();
@@ -1570,7 +1574,7 @@ void rt_flush_dev(struct net_device *dev)
static bool rt_cache_valid(const struct rtable *rt)
{
return rt &&
- rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
!rt_is_expired(rt);
}
@@ -1684,8 +1688,8 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
else if (rt->rt_gw_family == AF_INET6)
new_rt->rt_gw6 = rt->rt_gw6;
- new_rt->dst.input = rt->dst.input;
- new_rt->dst.output = rt->dst.output;
+ new_rt->dst.input = READ_ONCE(rt->dst.input);
+ new_rt->dst.output = READ_ONCE(rt->dst.output);
new_rt->dst.error = rt->dst.error;
new_rt->dst.lastuse = jiffies;
new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
@@ -2207,7 +2211,7 @@ ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto martian_source;
}
- if (rt->rt_type != RTN_LOCAL)
+ if (!(rt->rt_flags & RTCF_LOCAL))
goto skip_validate_source;
reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev,
@@ -2328,7 +2332,7 @@ ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.flowi4_oif = 0;
fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
- fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4.flowi4_dscp = dscp;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
@@ -2572,12 +2576,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
!netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
- if (ipv4_is_lbcast(fl4->daddr))
+ if (ipv4_is_lbcast(fl4->daddr)) {
type = RTN_BROADCAST;
- else if (ipv4_is_multicast(fl4->daddr))
+
+ /* reset fi to prevent gateway resolution */
+ fi = NULL;
+ } else if (ipv4_is_multicast(fl4->daddr)) {
type = RTN_MULTICAST;
- else if (ipv4_is_zeronet(fl4->daddr))
+ } else if (ipv4_is_zeronet(fl4->daddr)) {
return ERR_PTR(-EINVAL);
+ }
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
@@ -2585,7 +2593,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
- fi = NULL;
} else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
@@ -2660,7 +2667,7 @@ add:
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(fl4->daddr)) {
rth->dst.input = ip_mr_input;
- rth->dst.output = ip_mc_output;
+ rth->dst.output = ip_mr_output;
}
}
#endif
@@ -2688,7 +2695,6 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
struct rtable *rth;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
- fl4->flowi4_tos &= INET_DSCP_MASK;
rcu_read_lock();
rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
@@ -2977,8 +2983,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
- if (rt->dst.lwtstate &&
- lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
@@ -3009,7 +3014,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
}
}
- expires = rt->dst.expires;
+ expires = READ_ONCE(rt->dst.expires);
if (expires) {
unsigned long now = jiffies;
@@ -3332,7 +3337,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fl4.daddr = dst;
fl4.saddr = src;
- fl4.flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4.flowi4_dscp = dscp;
fl4.flowi4_oif = nla_get_u32_default(tb[RTA_OIF], 0);
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5459a78b9809..569befcf021b 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <net/secure_seq.h>
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/route.h>
static siphash_aligned_key_t syncookie_secret[2];
@@ -403,6 +404,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_request_sock *ireq;
struct net *net = sock_net(sk);
+ struct tcp_request_sock *treq;
struct request_sock *req;
struct sock *ret = sk;
struct flowi4 fl4;
@@ -428,6 +430,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
}
ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
@@ -454,7 +457,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
IPPROTO_TCP, inet_sk_flowi_flags(sk),
opt->srr ? opt->faddr : ireq->ir_rmt_addr,
- ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
+ ireq->ir_loc_addr, th->source, th->dest,
+ sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
@@ -482,6 +486,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
if (!req->syncookie)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
+ treq->accecn_ok = ireq->ecn_ok && cookie_accecn_ok(th);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 3a43010d726f..24dbc603cc44 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -47,6 +47,7 @@ static unsigned int udp_child_hash_entries_max = UDP_HTABLE_SIZE_MAX;
static int tcp_plb_max_rounds = 31;
static int tcp_plb_max_cong_thresh = 256;
static unsigned int tcp_tw_reuse_delay_max = TCP_PAWS_MSL * MSEC_PER_SEC;
+static int tcp_ecn_mode_max = 2;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -728,9 +729,27 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dou8vec_minmax,
.extra1 = SYSCTL_ZERO,
+ .extra2 = &tcp_ecn_mode_max,
+ },
+ {
+ .procname = "tcp_ecn_option",
+ .data = &init_net.ipv4.sysctl_tcp_ecn_option,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
{
+ .procname = "tcp_ecn_option_beacon",
+ .data = &init_net.ipv4.sysctl_tcp_ecn_option_beacon,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_THREE,
+ },
+ {
.procname = "tcp_ecn_fallback",
.data = &init_net.ipv4.sysctl_tcp_ecn_fallback,
.maxlen = sizeof(u8),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f64f8276a73c..8a18aeca7ab0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -270,11 +270,14 @@
#include <net/icmp.h>
#include <net/inet_common.h>
+#include <net/inet_ecn.h>
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/mptcp.h>
#include <net/proto_memory.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/psp.h>
#include <net/sock.h>
#include <net/rstreason.h>
@@ -302,8 +305,6 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_IPV6_MOD(sysctl_tcp_mem);
-atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
-EXPORT_IPV6_MOD(tcp_memory_allocated);
DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
@@ -414,6 +415,22 @@ static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
return rate64;
}
+#ifdef CONFIG_TCP_MD5SIG
+void tcp_md5_destruct_sock(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->md5sig_info) {
+
+ tcp_clear_md5_list(sk);
+ kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 1));
+ static_branch_slow_dec_deferred(&tcp_md5_needed);
+ tcp_md5_release_sigpool();
+ }
+}
+EXPORT_IPV6_MOD_GPL(tcp_md5_destruct_sock);
+#endif
+
/* Address-family independent initialization for a tcp_sock.
*
* NOTE: A lot of things set to zero explicitly by call to
@@ -689,6 +706,7 @@ void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK;
__skb_header_release(skb);
+ psp_enqueue_set_decrypted(sk, skb);
tcp_add_write_queue_tail(sk, skb);
sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
@@ -1176,7 +1194,7 @@ restart:
goto do_error;
while (msg_data_left(msg)) {
- ssize_t copy = 0;
+ int copy = 0;
skb = tcp_write_queue_tail(sk);
if (skb)
@@ -1297,8 +1315,7 @@ new_segment:
if (!copy)
goto wait_for_space;
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE) {
tcp_mark_push(tp, skb);
@@ -1774,6 +1791,7 @@ EXPORT_IPV6_MOD(tcp_peek_len);
/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
int tcp_set_rcvlowat(struct sock *sk, int val)
{
+ struct tcp_sock *tp = tcp_sk(sk);
int space, cap;
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
@@ -1792,7 +1810,9 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
space = tcp_space_from_win(sk, val);
if (space > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, space);
- WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
+
+ if (tp->window_clamp && tp->window_clamp < val)
+ WRITE_ONCE(tp->window_clamp, val);
}
return 0;
}
@@ -2821,9 +2841,9 @@ found_ok_skb:
err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
used);
- if (err <= 0) {
+ if (err < 0) {
if (!copied)
- copied = -EFAULT;
+ copied = err;
break;
}
@@ -3102,8 +3122,8 @@ bool tcp_check_oom(const struct sock *sk, int shift)
void __tcp_close(struct sock *sk, long timeout)
{
+ bool data_was_unread = false;
struct sk_buff *skb;
- int data_was_unread = 0;
int state;
WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
@@ -3121,13 +3141,14 @@ void __tcp_close(struct sock *sk, long timeout)
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
+ while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- len--;
- data_was_unread += len;
- __kfree_skb(skb);
+ end_seq--;
+ if (after(end_seq, tcp_sk(sk)->copied_seq))
+ data_was_unread = true;
+ tcp_eat_recv_skb(sk, skb);
}
/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
@@ -3198,7 +3219,7 @@ adjudge_to_death:
/* remove backlog if any, without releasing ownership. */
__release_sock(sk);
- this_cpu_inc(tcp_orphan_count);
+ tcp_orphan_count_inc();
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -3330,6 +3351,7 @@ int tcp_disconnect(struct sock *sk, int flags)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int old_state = sk->sk_state;
+ struct request_sock *req;
u32 seq;
if (old_state != TCP_CLOSE)
@@ -3379,7 +3401,7 @@ int tcp_disconnect(struct sock *sk, int flags)
WRITE_ONCE(tp->write_seq, seq);
icsk->icsk_backoff = 0;
- icsk->icsk_probes_out = 0;
+ WRITE_ONCE(icsk->icsk_probes_out, 0);
icsk->icsk_probes_tstamp = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN);
@@ -3392,6 +3414,11 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
+ tp->accecn_fail_mode = 0;
+ tp->saw_accecn_opt = TCP_ACCECN_OPT_NOT_SEEN;
+ tcp_accecn_init_counters(tp);
+ tp->prev_ecnfield = 0;
+ tp->accecn_opt_tstamp = 0;
if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
@@ -3445,6 +3472,10 @@ int tcp_disconnect(struct sock *sk, int flags)
/* Clean up fastopen related fields */
+ req = rcu_dereference_protected(tp->fastopen_rsk,
+ lockdep_sock_is_held(sk));
+ if (req)
+ reqsk_fastopen_remove(sk, req, false);
tcp_free_fastopen_req(tp);
inet_clear_bit(DEFER_CONNECT, sk);
tp->fastopen_client_fail = 0;
@@ -3754,6 +3785,19 @@ int tcp_set_window_clamp(struct sock *sk, int val)
return 0;
}
+int tcp_sock_set_maxseg(struct sock *sk, int val)
+{
+ /* Values greater than interface MTU won't take effect. However
+ * at the point when this call is done we typically don't yet
+ * know which interface is going to be used
+ */
+ if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW))
+ return -EINVAL;
+
+ WRITE_ONCE(tcp_sk(sk)->rx_opt.user_mss, val);
+ return 0;
+}
+
/*
* Socket option code for TCP.
*/
@@ -3880,23 +3924,13 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
WRITE_ONCE(inet_csk(sk)->icsk_delack_max, delack_max);
return 0;
}
+ case TCP_MAXSEG:
+ return tcp_sock_set_maxseg(sk, val);
}
sockopt_lock_sock(sk);
switch (optname) {
- case TCP_MAXSEG:
- /* Values greater than interface MTU won't take effect. However
- * at the point when this call is done we typically don't yet
- * know which interface is going to be used
- */
- if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
- err = -EINVAL;
- break;
- }
- tp->rx_opt.user_mss = val;
- break;
-
case TCP_NODELAY:
__tcp_sock_set_nodelay(sk, val);
break;
@@ -4135,6 +4169,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
+ const u8 ect1_idx = INET_ECN_ECT_1 - 1;
+ const u8 ect0_idx = INET_ECN_ECT_0 - 1;
+ const u8 ce_idx = INET_ECN_CE - 1;
unsigned long rate;
u32 now;
u64 rate64;
@@ -4261,6 +4298,16 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (tp->rto_stamp)
info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
+ info->tcpi_accecn_fail_mode = tp->accecn_fail_mode;
+ info->tcpi_accecn_opt_seen = tp->saw_accecn_opt;
+ info->tcpi_received_ce = tp->received_ce;
+ info->tcpi_delivered_e1_bytes = tp->delivered_ecn_bytes[ect1_idx];
+ info->tcpi_delivered_e0_bytes = tp->delivered_ecn_bytes[ect0_idx];
+ info->tcpi_delivered_ce_bytes = tp->delivered_ecn_bytes[ce_idx];
+ info->tcpi_received_e1_bytes = tp->received_ecn_bytes[ect1_idx];
+ info->tcpi_received_e0_bytes = tp->received_ecn_bytes[ect0_idx];
+ info->tcpi_received_ce_bytes = tp->received_ecn_bytes[ce_idx];
+
unlock_sock_fast(sk, slow);
}
EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -4346,7 +4393,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
- nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
+ nla_put_u8(stats, TCP_NLA_RECUR_RETRANS,
+ READ_ONCE(inet_csk(sk)->icsk_retransmits));
nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
@@ -4381,6 +4429,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
+ int user_mss;
int val, len;
if (copy_from_sockptr(&len, optlen, sizeof(int)))
@@ -4394,9 +4443,10 @@ int do_tcp_getsockopt(struct sock *sk, int level,
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
- if (tp->rx_opt.user_mss &&
+ user_mss = READ_ONCE(tp->rx_opt.user_mss);
+ if (user_mss &&
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
- val = tp->rx_opt.user_mss;
+ val = user_mss;
if (tp->repair)
val = tp->rx_opt.mss_clamp;
break;
@@ -5053,9 +5103,10 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40);
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, tcp_clean_acked);
+#endif
/* TXRX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
@@ -5066,11 +5117,9 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32);
/* RX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
@@ -5081,12 +5130,6 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 77);
-#else
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
-#endif
/* TX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);
@@ -5100,11 +5143,11 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, accecn_opt_tstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89);
/* TXRX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
@@ -5119,15 +5162,13 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ce);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ecn_bytes);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_tstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
- /* 32bit arches with 8byte alignment on u64 fields might need padding
- * before tcp_clock_cache.
- */
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
-
/* RX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in);
@@ -5138,12 +5179,12 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_ecn_bytes);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99);
}
void __init tcp_init(void)
@@ -5243,6 +5284,6 @@ void __init tcp_init(void)
tcp_v4_init();
tcp_metrics_init();
BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
- tcp_tasklet_init();
+ tcp_tsq_work_init();
mptcp_init();
}
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index bbb8d5f0eae7..34b8450829d0 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -268,9 +268,8 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head)
kfree_sensitive(key);
}
-static void tcp_ao_info_free_rcu(struct rcu_head *head)
+static void tcp_ao_info_free(struct tcp_ao_info *ao)
{
- struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
struct tcp_ao_key *key;
struct hlist_node *n;
@@ -310,7 +309,7 @@ void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
if (!twsk)
tcp_ao_sk_omem_free(sk, ao);
- call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
+ tcp_ao_info_free(ao);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
@@ -1178,7 +1177,9 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
if (!ao)
return;
- WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
+ /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
+ if (skb)
+ WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
ao->rcv_sne = 0;
hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index ba581785adb4..a268e1595b22 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -408,8 +408,11 @@ more_data:
if (!psock->cork) {
psock->cork = kzalloc(sizeof(*psock->cork),
GFP_ATOMIC | __GFP_NOWARN);
- if (!psock->cork)
+ if (!psock->cork) {
+ sk_msg_free(sk, msg);
+ *copied = 0;
return -ENOMEM;
+ }
}
memcpy(psock->cork, msg, sizeof(*msg));
return 0;
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index ba4d98e510e0..fbad6c35dee9 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -379,7 +379,7 @@ static void tcp_cdg_init(struct sock *sk)
/* We silently fall back to window = 1 if allocation fails. */
if (window > 1)
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
- GFP_NOWAIT | __GFP_NOWARN);
+ GFP_NOWAIT);
ca->rtt_seq = tp->snd_nxt;
ca->shadow_wnd = tcp_snd_cwnd(tp);
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 45e174b8cd22..d83efd91f461 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -12,6 +12,9 @@
#include <linux/tcp.h>
+#include <net/inet_hashtables.h>
+#include <net/inet6_hashtables.h>
+#include <net/inet_timewait_sock.h>
#include <net/netlink.h>
#include <net/tcp.h>
@@ -174,27 +177,465 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin)
size += ulp_ops->get_info_size(sk, net_admin);
}
}
- return size;
+
+ return size
+ + nla_total_size(sizeof(struct tcp_info))
+ + nla_total_size(sizeof(struct inet_diag_msg))
+ + inet_diag_msg_attrs_size()
+ + nla_total_size(sizeof(struct inet_diag_meminfo))
+ + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+ + nla_total_size(TCP_CA_NAME_MAX)
+ + nla_total_size(sizeof(struct tcpvegas_info))
+ + 64;
+}
+
+static int tcp_twsk_diag_fill(struct sock *sk,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ u16 nlmsg_flags, bool net_admin)
+{
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+ struct inet_diag_msg *r;
+ struct nlmsghdr *nlh;
+ long tmo;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
+ sizeof(*r), nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
+ DEBUG_NET_WARN_ON_ONCE(tw->tw_state != TCP_TIME_WAIT);
+
+ inet_diag_msg_common_fill(r, sk);
+ r->idiag_retrans = 0;
+
+ r->idiag_state = READ_ONCE(tw->tw_substate);
+ r->idiag_timer = 3;
+ tmo = tw->tw_timer.expires - jiffies;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+ r->idiag_uid = 0;
+ r->idiag_inode = 0;
+
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+ tw->tw_mark)) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static int tcp_req_diag_fill(struct sock *sk, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ u16 nlmsg_flags, bool net_admin)
+{
+ struct request_sock *reqsk = inet_reqsk(sk);
+ struct inet_diag_msg *r;
+ struct nlmsghdr *nlh;
+ long tmo;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
+ inet_diag_msg_common_fill(r, sk);
+ r->idiag_state = TCP_SYN_RECV;
+ r->idiag_timer = 1;
+ r->idiag_retrans = READ_ONCE(reqsk->num_retrans);
+
+ BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
+ offsetof(struct sock, sk_cookie));
+
+ tmo = READ_ONCE(inet_reqsk(sk)->rsk_timer.expires) - jiffies;
+ r->idiag_expires = jiffies_delta_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+ r->idiag_uid = 0;
+ r->idiag_inode = 0;
+
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+ inet_rsk(reqsk)->ir_mark)) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ u16 nlmsg_flags, bool net_admin)
+{
+ if (sk->sk_state == TCP_TIME_WAIT)
+ return tcp_twsk_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ return tcp_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
+
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
+ net_admin);
+}
+
+static void twsk_build_assert(void)
+{
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
+ offsetof(struct sock, sk_family));
+
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
+ offsetof(struct inet_sock, inet_num));
+
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
+ offsetof(struct inet_sock, inet_dport));
+
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
+ offsetof(struct inet_sock, inet_rcv_saddr));
+
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
+ offsetof(struct inet_sock, inet_daddr));
+
+#if IS_ENABLED(CONFIG_IPV6)
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
+ offsetof(struct sock, sk_v6_rcv_saddr));
+
+ BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
+ offsetof(struct sock, sk_v6_daddr));
+#endif
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
- struct inet_hashinfo *hinfo;
+ bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+ struct inet_diag_dump_data *cb_data = cb->data;
+ struct net *net = sock_net(skb->sk);
+ u32 idiag_states = r->idiag_states;
+ struct inet_hashinfo *hashinfo;
+ int i, num, s_i, s_num;
+ struct sock *sk;
- hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo;
+ hashinfo = net->ipv4.tcp_death_row.hashinfo;
+ if (idiag_states & TCPF_SYN_RECV)
+ idiag_states |= TCPF_NEW_SYN_RECV;
+ s_i = cb->args[1];
+ s_num = num = cb->args[2];
+
+ if (cb->args[0] == 0) {
+ if (!(idiag_states & TCPF_LISTEN) || r->id.idiag_dport)
+ goto skip_listen_ht;
+
+ for (i = s_i; i <= hashinfo->lhash2_mask; i++) {
+ struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
+
+ num = 0;
+ ilb = &hashinfo->lhash2[i];
+
+ if (hlist_nulls_empty(&ilb->nulls_head)) {
+ s_num = 0;
+ continue;
+ }
+ spin_lock(&ilb->lock);
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
+ if (num < s_num) {
+ num++;
+ continue;
+ }
+
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_listen;
+
+ if (r->id.idiag_sport != inet->inet_sport &&
+ r->id.idiag_sport)
+ goto next_listen;
+
+ if (!inet_diag_bc_sk(cb_data, sk))
+ goto next_listen;
+
+ if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
+ cb, r, NLM_F_MULTI,
+ net_admin) < 0) {
+ spin_unlock(&ilb->lock);
+ goto done;
+ }
+
+next_listen:
+ ++num;
+ }
+ spin_unlock(&ilb->lock);
+
+ s_num = 0;
+ }
+skip_listen_ht:
+ cb->args[0] = 1;
+ s_i = num = s_num = 0;
+ }
+
+/* Process a maximum of SKARR_SZ sockets at a time when walking hash buckets
+ * with bh disabled.
+ */
+#define SKARR_SZ 16
+
+ /* Dump bound but inactive (not listening, connecting, etc.) sockets */
+ if (cb->args[0] == 1) {
+ if (!(idiag_states & TCPF_BOUND_INACTIVE))
+ goto skip_bind_ht;
+
+ for (i = s_i; i < hashinfo->bhash_size; i++) {
+ struct inet_bind_hashbucket *ibb;
+ struct inet_bind2_bucket *tb2;
+ struct sock *sk_arr[SKARR_SZ];
+ int num_arr[SKARR_SZ];
+ int idx, accum, res;
+
+resume_bind_walk:
+ num = 0;
+ accum = 0;
+ ibb = &hashinfo->bhash2[i];
+
+ if (hlist_empty(&ibb->chain)) {
+ s_num = 0;
+ continue;
+ }
+ spin_lock_bh(&ibb->lock);
+ inet_bind_bucket_for_each(tb2, &ibb->chain) {
+ if (!net_eq(ib2_net(tb2), net))
+ continue;
+
+ sk_for_each_bound(sk, &tb2->owners) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (num < s_num)
+ goto next_bind;
+
+ if (sk->sk_state != TCP_CLOSE ||
+ !inet->inet_num)
+ goto next_bind;
+
+ if (r->sdiag_family != AF_UNSPEC &&
+ r->sdiag_family != sk->sk_family)
+ goto next_bind;
+
+ if (!inet_diag_bc_sk(cb_data, sk))
+ goto next_bind;
+
+ sock_hold(sk);
+ num_arr[accum] = num;
+ sk_arr[accum] = sk;
+ if (++accum == SKARR_SZ)
+ goto pause_bind_walk;
+next_bind:
+ num++;
+ }
+ }
+pause_bind_walk:
+ spin_unlock_bh(&ibb->lock);
+
+ res = 0;
+ for (idx = 0; idx < accum; idx++) {
+ if (res >= 0) {
+ res = inet_sk_diag_fill(sk_arr[idx],
+ NULL, skb, cb,
+ r, NLM_F_MULTI,
+ net_admin);
+ if (res < 0)
+ num = num_arr[idx];
+ }
+ sock_put(sk_arr[idx]);
+ }
+ if (res < 0)
+ goto done;
+
+ cond_resched();
+
+ if (accum == SKARR_SZ) {
+ s_num = num + 1;
+ goto resume_bind_walk;
+ }
+
+ s_num = 0;
+ }
+skip_bind_ht:
+ cb->args[0] = 2;
+ s_i = num = s_num = 0;
+ }
- inet_diag_dump_icsk(hinfo, skb, cb, r);
+ if (!(idiag_states & ~TCPF_LISTEN))
+ goto out;
+
+ for (i = s_i; i <= hashinfo->ehash_mask; i++) {
+ struct inet_ehash_bucket *head = &hashinfo->ehash[i];
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
+ struct hlist_nulls_node *node;
+ struct sock *sk_arr[SKARR_SZ];
+ int num_arr[SKARR_SZ];
+ int idx, accum, res;
+
+ if (hlist_nulls_empty(&head->chain))
+ continue;
+
+ if (i > s_i)
+ s_num = 0;
+
+next_chunk:
+ num = 0;
+ accum = 0;
+ spin_lock_bh(lock);
+ sk_nulls_for_each(sk, node, &head->chain) {
+ int state;
+
+ if (!net_eq(sock_net(sk), net))
+ continue;
+ if (num < s_num)
+ goto next_normal;
+ state = (sk->sk_state == TCP_TIME_WAIT) ?
+ READ_ONCE(inet_twsk(sk)->tw_substate) : sk->sk_state;
+ if (!(idiag_states & (1 << state)))
+ goto next_normal;
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_normal;
+ if (r->id.idiag_sport != htons(sk->sk_num) &&
+ r->id.idiag_sport)
+ goto next_normal;
+ if (r->id.idiag_dport != sk->sk_dport &&
+ r->id.idiag_dport)
+ goto next_normal;
+ twsk_build_assert();
+
+ if (!inet_diag_bc_sk(cb_data, sk))
+ goto next_normal;
+
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ goto next_normal;
+
+ num_arr[accum] = num;
+ sk_arr[accum] = sk;
+ if (++accum == SKARR_SZ)
+ break;
+next_normal:
+ ++num;
+ }
+ spin_unlock_bh(lock);
+
+ res = 0;
+ for (idx = 0; idx < accum; idx++) {
+ if (res >= 0) {
+ res = sk_diag_fill(sk_arr[idx], skb, cb, r,
+ NLM_F_MULTI, net_admin);
+ if (res < 0)
+ num = num_arr[idx];
+ }
+ sock_gen_put(sk_arr[idx]);
+ }
+ if (res < 0)
+ break;
+
+ cond_resched();
+
+ if (accum == SKARR_SZ) {
+ s_num = num + 1;
+ goto next_chunk;
+ }
+ }
+
+done:
+ cb->args[1] = i;
+ cb->args[2] = num;
+out:
+ ;
+}
+
+static struct sock *tcp_diag_find_one_icsk(struct net *net,
+ const struct inet_diag_req_v2 *req)
+{
+ struct sock *sk;
+
+ rcu_read_lock();
+ if (req->sdiag_family == AF_INET) {
+ sk = inet_lookup(net, NULL, 0, req->id.idiag_dst[0],
+ req->id.idiag_dport, req->id.idiag_src[0],
+ req->id.idiag_sport, req->id.idiag_if);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (req->sdiag_family == AF_INET6) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+ ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+ sk = inet_lookup(net, NULL, 0, req->id.idiag_dst[3],
+ req->id.idiag_dport, req->id.idiag_src[3],
+ req->id.idiag_sport, req->id.idiag_if);
+ else
+ sk = inet6_lookup(net, NULL, 0,
+ (struct in6_addr *)req->id.idiag_dst,
+ req->id.idiag_dport,
+ (struct in6_addr *)req->id.idiag_src,
+ req->id.idiag_sport,
+ req->id.idiag_if);
+#endif
+ } else {
+ rcu_read_unlock();
+ return ERR_PTR(-EINVAL);
+ }
+ rcu_read_unlock();
+ if (!sk)
+ return ERR_PTR(-ENOENT);
+
+ if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
+ sock_gen_put(sk);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return sk;
}
static int tcp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- struct inet_hashinfo *hinfo;
+ struct sk_buff *in_skb = cb->skb;
+ struct sk_buff *rep;
+ struct sock *sk;
+ struct net *net;
+ bool net_admin;
+ int err;
- hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo;
+ net = sock_net(in_skb->sk);
+ sk = tcp_diag_find_one_icsk(net, req);
+ if (IS_ERR(sk))
+ return PTR_ERR(sk);
- return inet_diag_dump_one_icsk(hinfo, cb, req);
+ net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
+ rep = nlmsg_new(tcp_diag_get_aux_size(sk, net_admin), GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ nlmsg_free(rep);
+ goto out;
+ }
+ err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+
+out:
+ if (sk)
+ sock_gen_put(sk);
+
+ return err;
}
#ifdef CONFIG_INET_DIAG_DESTROY
@@ -202,13 +643,10 @@ static int tcp_diag_destroy(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req)
{
struct net *net = sock_net(in_skb->sk);
- struct inet_hashinfo *hinfo;
struct sock *sk;
int err;
- hinfo = net->ipv4.tcp_death_row.hashinfo;
- sk = inet_diag_find_one_icsk(net, hinfo, req);
-
+ sk = tcp_diag_find_one_icsk(net, req);
if (IS_ERR(sk))
return PTR_ERR(sk);
@@ -226,7 +664,6 @@ static const struct inet_diag_handler tcp_diag_handler = {
.dump_one = tcp_diag_dump_one,
.idiag_get_info = tcp_diag_get_info,
.idiag_get_aux = tcp_diag_get_aux,
- .idiag_get_aux_size = tcp_diag_get_aux_size,
.idiag_type = IPPROTO_TCP,
.idiag_info_size = sizeof(struct tcp_info),
#ifdef CONFIG_INET_DIAG_DESTROY
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 9b83d639b5ac..7d945a527daf 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -3,6 +3,7 @@
#include <linux/tcp.h>
#include <linux/rcupdate.h>
#include <net/tcp.h>
+#include <net/busy_poll.h>
void tcp_fastopen_init_key_once(struct net *net)
{
@@ -279,6 +280,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
refcount_set(&req->rsk_refcnt, 2);
+ sk_mark_napi_id_set(child, skb);
+
/* Now finish processing the fastopen child socket. */
tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
@@ -556,6 +559,7 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct net_device *dev;
struct dst_entry *dst;
struct sk_buff *skb;
@@ -572,10 +576,12 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
}
} else if (tp->syn_fastopen_ch &&
atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
- dst = sk_dst_get(sk);
- if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!(dev && (dev->flags & IFF_LOOPBACK)))
atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
- dst_release(dst);
+ rcu_read_unlock();
}
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8ec92dec321a..31ea5af49f2d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -70,8 +70,10 @@
#include <linux/sysctl.h>
#include <linux/kernel.h>
#include <linux/prefetch.h>
+#include <linux/bitops.h>
#include <net/dst.h>
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/proto_memory.h>
#include <net/inet_common.h>
#include <linux/ipsec.h>
@@ -339,31 +341,6 @@ static bool tcp_in_quickack_mode(struct sock *sk)
(icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
}
-static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
-{
- if (tcp_ecn_mode_rfc3168(tp))
- tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
-}
-
-static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
-{
- if (tcp_hdr(skb)->cwr) {
- tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-
- /* If the sender is telling us it has entered CWR, then its
- * cwnd may be very low (even just 1 packet), so we should ACK
- * immediately.
- */
- if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
- }
-}
-
-static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
-{
- tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
-}
-
static void tcp_data_ecn_check(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -384,38 +361,117 @@ static void tcp_data_ecn_check(struct sock *sk, const struct sk_buff *skb)
if (tcp_ca_needs_ecn(sk))
tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
- if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
+ if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR) &&
+ tcp_ecn_mode_rfc3168(tp)) {
/* Better not delay acks, sender can have a very low cwnd */
tcp_enter_quickack_mode(sk, 2);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
}
+ /* As for RFC3168 ECN, the TCP_ECN_SEEN flag is set by
+ * tcp_data_ecn_check() when the ECN codepoint of
+ * received TCP data contains ECT(0), ECT(1), or CE.
+ */
+ if (!tcp_ecn_mode_rfc3168(tp))
+ break;
tp->ecn_flags |= TCP_ECN_SEEN;
break;
default:
if (tcp_ca_needs_ecn(sk))
tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
+ if (!tcp_ecn_mode_rfc3168(tp))
+ break;
tp->ecn_flags |= TCP_ECN_SEEN;
break;
}
}
-static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+/* Returns true if the byte counters can be used */
+static bool tcp_accecn_process_option(struct tcp_sock *tp,
+ const struct sk_buff *skb,
+ u32 delivered_bytes, int flag)
{
- if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || th->cwr))
- tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
-}
+ u8 estimate_ecnfield = tp->est_ecnfield;
+ bool ambiguous_ecn_bytes_incr = false;
+ bool first_changed = false;
+ unsigned int optlen;
+ bool order1, res;
+ unsigned int i;
+ u8 *ptr;
-static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
-{
- if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || !th->cwr))
- tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
-}
+ if (tcp_accecn_opt_fail_recv(tp))
+ return false;
-static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
-{
- if (th->ece && !th->syn && tcp_ecn_mode_rfc3168(tp))
- return true;
- return false;
+ if (!(flag & FLAG_SLOWPATH) || !tp->rx_opt.accecn) {
+ if (!tp->saw_accecn_opt) {
+ /* Too late to enable after this point due to
+ * potential counter wraps
+ */
+ if (tp->bytes_sent >= (1 << 23) - 1) {
+ u8 saw_opt = TCP_ACCECN_OPT_FAIL_SEEN;
+
+ tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
+ }
+ return false;
+ }
+
+ if (estimate_ecnfield) {
+ u8 ecnfield = estimate_ecnfield - 1;
+
+ tp->delivered_ecn_bytes[ecnfield] += delivered_bytes;
+ return true;
+ }
+ return false;
+ }
+
+ ptr = skb_transport_header(skb) + tp->rx_opt.accecn;
+ optlen = ptr[1] - 2;
+ if (WARN_ON_ONCE(ptr[0] != TCPOPT_ACCECN0 && ptr[0] != TCPOPT_ACCECN1))
+ return false;
+ order1 = (ptr[0] == TCPOPT_ACCECN1);
+ ptr += 2;
+
+ if (tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ tp->saw_accecn_opt = tcp_accecn_option_init(skb,
+ tp->rx_opt.accecn);
+ if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN)
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV);
+ }
+
+ res = !!estimate_ecnfield;
+ for (i = 0; i < 3; i++) {
+ u32 init_offset;
+ u8 ecnfield;
+ s32 delta;
+ u32 *cnt;
+
+ if (optlen < TCPOLEN_ACCECN_PERFIELD)
+ break;
+
+ ecnfield = tcp_accecn_optfield_to_ecnfield(i, order1);
+ init_offset = tcp_accecn_field_init_offset(ecnfield);
+ cnt = &tp->delivered_ecn_bytes[ecnfield - 1];
+ delta = tcp_update_ecn_bytes(cnt, ptr, init_offset);
+ if (delta && delta < 0) {
+ res = false;
+ ambiguous_ecn_bytes_incr = true;
+ }
+ if (delta && ecnfield != estimate_ecnfield) {
+ if (!first_changed) {
+ tp->est_ecnfield = ecnfield;
+ first_changed = true;
+ } else {
+ res = false;
+ ambiguous_ecn_bytes_incr = true;
+ }
+ }
+
+ optlen -= TCPOLEN_ACCECN_PERFIELD;
+ ptr += TCPOLEN_ACCECN_PERFIELD;
+ }
+ if (ambiguous_ecn_bytes_incr)
+ tp->est_ecnfield = 0;
+
+ return res;
}
static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
@@ -428,10 +484,101 @@ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
bool ece_ack)
{
tp->delivered += delivered;
- if (ece_ack)
+ if (tcp_ecn_mode_rfc3168(tp) && ece_ack)
tcp_count_delivered_ce(tp, delivered);
}
+/* Returns the ECN CE delta */
+static u32 __tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
+ u32 delivered_pkts, u32 delivered_bytes,
+ int flag)
+{
+ u32 old_ceb = tcp_sk(sk)->delivered_ecn_bytes[INET_ECN_CE - 1];
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 delta, safe_delta, d_ceb;
+ bool opt_deltas_valid;
+ u32 corrected_ace;
+
+ /* Reordered ACK or uncertain due to lack of data to send and ts */
+ if (!(flag & (FLAG_FORWARD_PROGRESS | FLAG_TS_PROGRESS)))
+ return 0;
+
+ opt_deltas_valid = tcp_accecn_process_option(tp, skb,
+ delivered_bytes, flag);
+
+ if (!(flag & FLAG_SLOWPATH)) {
+ /* AccECN counter might overflow on large ACKs */
+ if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
+ return 0;
+ }
+
+ /* ACE field is not available during handshake */
+ if (flag & FLAG_SYN_ACKED)
+ return 0;
+
+ if (tp->received_ce_pending >= TCP_ACCECN_ACE_MAX_DELTA)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+
+ corrected_ace = tcp_accecn_ace(th) - TCP_ACCECN_CEP_INIT_OFFSET;
+ delta = (corrected_ace - tp->delivered_ce) & TCP_ACCECN_CEP_ACE_MASK;
+ if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
+ return delta;
+
+ safe_delta = delivered_pkts -
+ ((delivered_pkts - delta) & TCP_ACCECN_CEP_ACE_MASK);
+
+ if (opt_deltas_valid) {
+ d_ceb = tp->delivered_ecn_bytes[INET_ECN_CE - 1] - old_ceb;
+ if (!d_ceb)
+ return delta;
+
+ if ((delivered_pkts >= (TCP_ACCECN_CEP_ACE_MASK + 1) * 2) &&
+ (tcp_is_sack(tp) ||
+ ((1 << inet_csk(sk)->icsk_ca_state) &
+ (TCPF_CA_Open | TCPF_CA_CWR)))) {
+ u32 est_d_cep;
+
+ if (delivered_bytes <= d_ceb)
+ return safe_delta;
+
+ est_d_cep = DIV_ROUND_UP_ULL((u64)d_ceb *
+ delivered_pkts,
+ delivered_bytes);
+ return min(safe_delta,
+ delta +
+ (est_d_cep & ~TCP_ACCECN_CEP_ACE_MASK));
+ }
+
+ if (d_ceb > delta * tp->mss_cache)
+ return safe_delta;
+ if (d_ceb <
+ safe_delta * tp->mss_cache >> TCP_ACCECN_SAFETY_SHIFT)
+ return delta;
+ }
+
+ return safe_delta;
+}
+
+static u32 tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
+ u32 delivered_pkts, u32 delivered_bytes,
+ int *flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 delta;
+
+ delta = __tcp_accecn_process(sk, skb, delivered_pkts,
+ delivered_bytes, *flag);
+ if (delta > 0) {
+ tcp_count_delivered_ce(tp, delta);
+ *flag |= FLAG_ECE;
+ /* Recalculate header predictor */
+ if (tp->pred_flags)
+ tcp_fast_path_on(tp);
+ }
+ return delta;
+}
+
/* Buffer size and advertised window tuning.
*
* 1. Tuning sk->sk_sndbuf, when connection enters established state.
@@ -744,7 +891,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
}
}
-static void tcp_rcvbuf_grow(struct sock *sk)
+void tcp_rcvbuf_grow(struct sock *sk)
{
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -1030,6 +1177,7 @@ struct tcp_sacktag_state {
u64 last_sackt;
u32 reord;
u32 sack_delivered;
+ u32 delivered_bytes;
int flag;
unsigned int mss_now;
struct rate_sample *rate;
@@ -1391,7 +1539,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- int dup_sack, int pcount,
+ int dup_sack, int pcount, u32 plen,
u64 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1451,11 +1599,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
tp->sacked_out += pcount;
/* Out-of-order packets delivered */
state->sack_delivered += pcount;
-
- /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
- if (tp->lost_skb_hint &&
- before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
- tp->lost_cnt_hint += pcount;
+ state->delivered_bytes += plen;
}
/* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1492,13 +1636,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
* tcp_highest_sack_seq() when skb is highest_sack.
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
- start_seq, end_seq, dup_sack, pcount,
+ start_seq, end_seq, dup_sack, pcount, skb->len,
tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate);
- if (skb == tp->lost_skb_hint)
- tp->lost_cnt_hint += pcount;
-
TCP_SKB_CB(prev)->end_seq += shifted;
TCP_SKB_CB(skb)->seq += shifted;
@@ -1531,10 +1672,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
if (skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = prev;
- if (skb == tp->lost_skb_hint) {
- tp->lost_skb_hint = prev;
- tp->lost_cnt_hint -= tcp_skb_pcount(prev);
- }
TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
@@ -1784,6 +1921,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
+ skb->len,
tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate);
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
@@ -2151,12 +2289,6 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
tp->undo_retrans = -1;
}
-static bool tcp_is_rack(const struct sock *sk)
-{
- return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
- TCP_RACK_LOSS_DETECTION;
-}
-
/* If we detect SACK reneging, forget all SACK information
* and reset tags completely, otherwise preserve SACKs. If receiver
* dropped its ofo queue, we will know this due to reneging detection.
@@ -2182,8 +2314,7 @@ static void tcp_timeout_mark_lost(struct sock *sk)
skb_rbtree_walk_from(skb) {
if (is_reneg)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
- else if (tcp_is_rack(sk) && skb != head &&
- tcp_rack_skb_timeout(tp, skb, 0) > 0)
+ else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0)
continue; /* Don't mark recently sent ones lost yet */
tcp_mark_skb_lost(sk, skb);
}
@@ -2264,22 +2395,6 @@ static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
return false;
}
-/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
- * counter when SACK is enabled (without SACK, sacked_out is used for
- * that purpose).
- *
- * With reordering, holes may still be in flight, so RFC3517 recovery
- * uses pure sacked_out (total number of SACKed segments) even though
- * it violates the RFC that uses duplicate ACKs, often these are equal
- * but when e.g. out-of-window ACKs or packet duplication occurs,
- * they differ. Since neither occurs due to loss, TCP should really
- * ignore them.
- */
-static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
-{
- return tp->sacked_out + 1;
-}
-
/* Linux NewReno/SACK/ECN state machine.
* --------------------------------------
*
@@ -2332,13 +2447,7 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
*
* If the receiver supports SACK:
*
- * RFC6675/3517: It is the conventional algorithm. A packet is
- * considered lost if the number of higher sequence packets
- * SACKed is greater than or equal the DUPACK thoreshold
- * (reordering). This is implemented in tcp_mark_head_lost and
- * tcp_update_scoreboard.
- *
- * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
+ * RACK (RFC8985): RACK is a newer loss detection algorithm
* (2017-) that checks timing instead of counting DUPACKs.
* Essentially a packet is considered lost if it's not S/ACKed
* after RTT + reordering_window, where both metrics are
@@ -2353,8 +2462,8 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
* is lost (NewReno). This heuristics are the same in NewReno
* and SACK.
*
- * Really tricky (and requiring careful tuning) part of algorithm
- * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
+ * The really tricky (and requiring careful tuning) part of the algorithm
+ * is hidden in the RACK code in tcp_recovery.c and tcp_xmit_retransmit_queue().
* The first determines the moment _when_ we should reduce CWND and,
* hence, slow down forward transmission. In fact, it determines the moment
* when we decide that hole is caused by loss, rather than by a reorder.
@@ -2377,83 +2486,10 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
* Main question: may we further continue forward transmission
* with the same cwnd?
*/
-static bool tcp_time_to_recover(struct sock *sk, int flag)
+static bool tcp_time_to_recover(const struct tcp_sock *tp)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
- /* Trick#1: The loss is proven. */
- if (tp->lost_out)
- return true;
-
- /* Not-A-Trick#2 : Classic rule... */
- if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
- return true;
-
- return false;
-}
-
-/* Detect loss in event "A" above by marking head of queue up as lost.
- * For RFC3517 SACK, a segment is considered lost if it
- * has at least tp->reordering SACKed seqments above it; "packets" refers to
- * the maximum SACKed segments to pass before reaching this limit.
- */
-static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- int cnt;
- /* Use SACK to deduce losses of new sequences sent during recovery */
- const u32 loss_high = tp->snd_nxt;
-
- WARN_ON(packets > tp->packets_out);
- skb = tp->lost_skb_hint;
- if (skb) {
- /* Head already handled? */
- if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
- return;
- cnt = tp->lost_cnt_hint;
- } else {
- skb = tcp_rtx_queue_head(sk);
- cnt = 0;
- }
-
- skb_rbtree_walk_from(skb) {
- /* TODO: do this better */
- /* this is not the most efficient way to do this... */
- tp->lost_skb_hint = skb;
- tp->lost_cnt_hint = cnt;
-
- if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
- break;
-
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
- cnt += tcp_skb_pcount(skb);
-
- if (cnt > packets)
- break;
-
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST))
- tcp_mark_skb_lost(sk, skb);
-
- if (mark_head)
- break;
- }
- tcp_verify_left_out(tp);
-}
-
-/* Account newly detected lost packet(s) */
-
-static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (tcp_is_sack(tp)) {
- int sacked_upto = tp->sacked_out - tp->reordering;
- if (sacked_upto >= 0)
- tcp_mark_head_lost(sk, sacked_upto, 0);
- else if (fast_rexmit)
- tcp_mark_head_lost(sk, 1, 1);
- }
+ /* Has loss detection marked at least one packet lost? */
+ return tp->lost_out != 0;
}
static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
@@ -2479,20 +2515,33 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
const struct sock *sk = (const struct sock *)tp;
- if (tp->retrans_stamp &&
- tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
- return true; /* got echoed TS before first retransmission */
+ /* Received an echoed timestamp before the first retransmission? */
+ if (tp->retrans_stamp)
+ return tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
+
+ /* We set tp->retrans_stamp upon the first retransmission of a loss
+ * recovery episode, so normally if tp->retrans_stamp is 0 then no
+ * retransmission has happened yet (likely due to TSQ, which can cause
+ * fast retransmits to be delayed). So if snd_una advanced while
+ * (tp->retrans_stamp is 0 then apparently a packet was merely delayed,
+ * not lost. But there are exceptions where we retransmit but then
+ * clear tp->retrans_stamp, so we check for those exceptions.
+ */
- /* Check if nothing was retransmitted (retrans_stamp==0), which may
- * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
- * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
- * retrans_stamp even if we had retransmitted the SYN.
+ /* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen()
+ * clears tp->retrans_stamp when snd_una == high_seq.
*/
- if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */
- sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */
- return true; /* nothing was retransmitted */
+ if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq))
+ return false;
- return false;
+ /* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp
+ * when setting FLAG_SYN_ACKED is set, even if the SYN was
+ * retransmitted.
+ */
+ if (sk->sk_state == TCP_SYN_SENT)
+ return false;
+
+ return true; /* tp->retrans_stamp is zero; no retransmit yet */
}
/* Undo procedures. */
@@ -2670,7 +2719,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
if (frto_undo)
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUSRTOS);
- inet_csk(sk)->icsk_retransmits = 0;
+ WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
if (tcp_is_non_sack_preventing_reopen(sk))
return true;
if (frto_undo || tcp_is_sack(tp)) {
@@ -2881,8 +2930,6 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_mark_skb_lost(sk, skb);
}
- tcp_clear_retrans_hints_partial(tp);
-
if (!tp->lost_out)
return;
@@ -2990,17 +3037,8 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
*rexmit = REXMIT_LOST;
}
-static bool tcp_force_fast_retransmit(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- return after(tcp_highest_sack_seq(tp),
- tp->snd_una + tp->reordering * tp->mss_cache);
-}
-
/* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
- bool *do_lost)
+static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3025,9 +3063,6 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
- } else {
- /* Partial ACK arrived. Force fast retransmit. */
- *do_lost = tcp_force_fast_retransmit(sk);
}
return false;
}
@@ -3041,7 +3076,7 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
if (unlikely(tcp_is_reno(tp))) {
tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
- } else if (tcp_is_rack(sk)) {
+ } else {
u32 prior_retrans = tp->retrans_out;
if (tcp_rack_mark_lost(sk))
@@ -3068,10 +3103,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int fast_rexmit = 0, flag = *ack_flag;
+ int flag = *ack_flag;
bool ece_ack = flag & FLAG_ECE;
- bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
- tcp_force_fast_retransmit(sk));
if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0;
@@ -3120,7 +3153,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack, ece_ack);
- } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
+ } else if (tcp_try_undo_partial(sk, prior_snd_una))
return;
if (tcp_try_undo_dsack(sk))
@@ -3128,7 +3161,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) {
- if (!tcp_time_to_recover(sk, flag))
+ if (!tcp_time_to_recover(tp))
return;
/* Undo reverts the recovery state. If loss is evident,
* starts a new recovery (e.g. reordering then loss);
@@ -3157,7 +3190,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_try_undo_dsack(sk);
tcp_identify_packet_loss(sk, ack_flag);
- if (!tcp_time_to_recover(sk, flag)) {
+ if (!tcp_time_to_recover(tp)) {
tcp_try_to_open(sk, flag);
return;
}
@@ -3175,11 +3208,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
/* Otherwise enter Recovery state */
tcp_enter_recovery(sk, ece_ack);
- fast_rexmit = 1;
}
- if (!tcp_is_rack(sk) && do_lost)
- tcp_update_scoreboard(sk, fast_rexmit);
*rexmit = REXMIT_LOST;
}
@@ -3400,6 +3430,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (sacked & TCPCB_SACKED_ACKED) {
tp->sacked_out -= acked_pcount;
+ /* snd_una delta covers these skbs */
+ sack->delivered_bytes -= skb->len;
} else if (tcp_is_sack(tp)) {
tcp_count_delivered(tp, acked_pcount, ece_ack);
if (!tcp_skb_spurious_retrans(tp, skb))
@@ -3435,8 +3467,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
next = skb_rb_next(skb);
if (unlikely(skb == tp->retransmit_skb_hint))
tp->retransmit_skb_hint = NULL;
- if (unlikely(skb == tp->lost_skb_hint))
- tp->lost_skb_hint = NULL;
tcp_highest_sack_replace(sk, skb, next);
tcp_rtx_queue_unlink_and_free(skb, sk);
}
@@ -3494,15 +3524,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (flag & FLAG_RETRANS_DATA_ACKED)
flag &= ~FLAG_ORIG_SACK_ACKED;
} else {
- int delta;
-
/* Non-retransmitted hole got filled? That's reordering */
if (before(reord, prior_fack))
tcp_check_sack_reordering(sk, reord, 0);
-
- delta = prior_sacked - tp->sacked_out;
- tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
}
+
+ sack->delivered_bytes = (skb ?
+ TCP_SKB_CB(skb)->seq : tp->snd_una) -
+ prior_snd_una;
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
tcp_skb_timestamp_us(skb))) {
@@ -3772,8 +3801,18 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
}
+static void tcp_send_ack_reflect_ect(struct sock *sk, bool accecn_reflector)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u16 flags = 0;
+
+ if (accecn_reflector)
+ flags = tcp_accecn_reflector_flags(tp->syn_ect_rcv);
+ __tcp_send_ack(sk, tp->rcv_nxt, flags);
+}
+
/* RFC 5961 7 [ACK Throttling] */
-static void tcp_send_challenge_ack(struct sock *sk)
+static void tcp_send_challenge_ack(struct sock *sk, bool accecn_reflector)
{
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
@@ -3803,7 +3842,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
send_ack:
NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
- tcp_send_ack(sk);
+ tcp_send_ack_reflect_ect(sk, accecn_reflector);
}
}
@@ -3841,7 +3880,7 @@ static int tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
}
/* This routine deals with acks during a TLP episode and ends an episode by
- * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
+ * resetting tlp_high_seq. Ref: TLP algorithm in RFC8985
*/
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
@@ -3914,7 +3953,8 @@ static void tcp_xmit_recovery(struct sock *sk, int rexmit)
}
/* Returns the number of packets newly acked or sacked by the current ACK */
-static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
+static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered,
+ u32 ecn_count, int flag)
{
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3922,8 +3962,12 @@ static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
delivered = tp->delivered - prior_delivered;
NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
- if (flag & FLAG_ECE)
- NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
+
+ if (flag & FLAG_ECE) {
+ if (tcp_ecn_mode_rfc3168(tp))
+ ecn_count = delivered;
+ NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, ecn_count);
+ }
return delivered;
}
@@ -3944,11 +3988,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 delivered = tp->delivered;
u32 lost = tp->lost;
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
+ u32 ecn_count = 0; /* Did we receive ECE/an AccECN ACE update? */
u32 prior_fack;
sack_state.first_sackt = 0;
sack_state.rate = &rs;
sack_state.sack_delivered = 0;
+ sack_state.delivered_bytes = 0;
/* We very likely will need to access rtx queue. */
prefetch(sk->tcp_rtx_queue.rb_node);
@@ -3964,7 +4010,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - max_window)) {
if (!(flag & FLAG_NO_CHALLENGE_ACK))
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, false);
return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
}
goto old_ack;
@@ -3978,7 +4024,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, prior_snd_una)) {
flag |= FLAG_SND_UNA_ADVANCED;
- icsk->icsk_retransmits = 0;
+ WRITE_ONCE(icsk->icsk_retransmits, 0);
#if IS_ENABLED(CONFIG_TLS_DEVICE)
if (static_branch_unlikely(&clean_acked_data_enabled.key))
@@ -4039,8 +4085,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
- WRITE_ONCE(sk->sk_err_soft, 0);
- icsk->icsk_probes_out = 0;
+ if (READ_ONCE(sk->sk_err_soft))
+ WRITE_ONCE(sk->sk_err_soft, 0);
+ WRITE_ONCE(icsk->icsk_probes_out, 0);
tp->rcv_tstamp = tcp_jiffies32;
if (!prior_packets)
goto no_queue;
@@ -4051,6 +4098,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_rack_update_reo_wnd(sk, &rs);
+ if (tcp_ecn_mode_accecn(tp))
+ ecn_count = tcp_accecn_process(sk, skb,
+ tp->delivered - delivered,
+ sack_state.delivered_bytes,
+ &flag);
+
tcp_in_ack_event(sk, flag);
if (tp->tlp_high_seq)
@@ -4075,7 +4128,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
sk_dst_confirm(sk);
- delivered = tcp_newly_delivered(sk, delivered, flag);
+ delivered = tcp_newly_delivered(sk, delivered, ecn_count, flag);
+
lost = tp->lost - lost; /* freshly marked lost */
rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
@@ -4084,12 +4138,17 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
return 1;
no_queue:
+ if (tcp_ecn_mode_accecn(tp))
+ ecn_count = tcp_accecn_process(sk, skb,
+ tp->delivered - delivered,
+ sack_state.delivered_bytes,
+ &flag);
tcp_in_ack_event(sk, flag);
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK) {
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit);
- tcp_newly_delivered(sk, delivered, flag);
+ tcp_newly_delivered(sk, delivered, ecn_count, flag);
}
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
@@ -4110,7 +4169,7 @@ old_ack:
&sack_state);
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit);
- tcp_newly_delivered(sk, delivered, flag);
+ tcp_newly_delivered(sk, delivered, ecn_count, flag);
tcp_xmit_recovery(sk, rexmit);
}
@@ -4210,6 +4269,7 @@ void tcp_parse_options(const struct net *net,
ptr = (const unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
+ opt_rx->accecn = 0;
opt_rx->saw_unknown = 0;
while (length > 0) {
@@ -4301,6 +4361,12 @@ void tcp_parse_options(const struct net *net,
ptr, th->syn, foc, false);
break;
+ case TCPOPT_ACCECN0:
+ case TCPOPT_ACCECN1:
+ /* Save offset of AccECN option in TCP header */
+ opt_rx->accecn = (ptr - 2) - (__u8 *)th;
+ break;
+
case TCPOPT_EXP:
/* Fast Open option shares code 254 using a
* 16 bits magic number.
@@ -4361,11 +4427,14 @@ static bool tcp_fast_parse_options(const struct net *net,
*/
if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
+ tp->rx_opt.accecn = 0;
return false;
} else if (tp->rx_opt.tstamp_ok &&
th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
- if (tcp_parse_aligned_timestamp(tp, th))
+ if (tcp_parse_aligned_timestamp(tp, th)) {
+ tp->rx_opt.accecn = 0;
return true;
+ }
}
tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
@@ -4518,14 +4587,22 @@ static enum skb_drop_reason tcp_disordered_ack_check(const struct sock *sk,
* (borrowed from freebsd)
*/
-static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
+static enum skb_drop_reason tcp_sequence(const struct sock *sk,
u32 seq, u32 end_seq)
{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
if (before(end_seq, tp->rcv_wup))
return SKB_DROP_REASON_TCP_OLD_SEQUENCE;
- if (after(seq, tp->rcv_nxt + tcp_receive_window(tp)))
- return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
+ if (after(end_seq, tp->rcv_nxt + tcp_receive_window(tp))) {
+ if (after(seq, tp->rcv_nxt + tcp_receive_window(tp)))
+ return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
+
+ /* Only accept this packet if receive queue is empty. */
+ if (skb_queue_len(&sk->sk_receive_queue))
+ return SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE;
+ }
return SKB_NOT_DROPPED_YET;
}
@@ -4949,7 +5026,7 @@ static bool tcp_ooo_try_coalesce(struct sock *sk,
noinline_for_tracing static void
tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
{
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
sk_skb_reason_drop(sk, skb, reason);
}
@@ -4972,8 +5049,9 @@ static void tcp_ofo_queue(struct sock *sk)
if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
__u32 dsack = dsack_high;
+
if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
- dsack_high = TCP_SKB_CB(skb)->end_seq;
+ dsack = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
p = rb_next(p);
@@ -5006,10 +5084,31 @@ static void tcp_ofo_queue(struct sock *sk)
static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
-static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+/* Check if this incoming skb can be added to socket receive queues
+ * while satisfying sk->sk_rcvbuf limit.
+ *
+ * In theory we should use skb->truesize, but this can cause problems
+ * when applications use too small SO_RCVBUF values.
+ * When LRO / hw gro is used, the socket might have a high tp->scaling_ratio,
+ * allowing RWIN to be close to available space.
+ * Whenever the receive queue gets full, we can receive a small packet
+ * filling RWIN, but with a high skb->truesize, because most NIC use 4K page
+ * plus sk_buff metadata even when receiving less than 1500 bytes of payload.
+ *
+ * Note that we use skb->len to decide to accept or drop this packet,
+ * but sk->sk_rmem_alloc is the sum of all skb->truesize.
+ */
+static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
+{
+ unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
+
+ return rmem + skb->len <= sk->sk_rcvbuf;
+}
+
+static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
unsigned int size)
{
- if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ if (!tcp_can_ingest(sk, skb) ||
!sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk, skb) < 0)
@@ -5041,6 +5140,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
return;
}
+ tcp_measure_rcv_mss(sk, skb);
/* Disable header prediction. */
tp->pred_flags = 0;
inet_csk_schedule_ack(sk);
@@ -5168,7 +5268,9 @@ end:
skb_condense(skb);
skb_set_owner_r(skb, sk);
}
- tcp_rcvbuf_grow(sk);
+ /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
+ if (sk->sk_socket)
+ tcp_rcvbuf_grow(sk);
}
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
@@ -5622,7 +5724,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
tp->ooo_last_skb = rb_to_skb(prev);
if (!prev || goal <= 0) {
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ if (tcp_can_ingest(sk, in_skb) &&
!tcp_under_memory_pressure(sk))
break;
goal = sk->sk_rcvbuf >> 3;
@@ -5654,14 +5756,18 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ /* Do nothing if our queues are empty. */
+ if (!atomic_read(&sk->sk_rmem_alloc))
+ return -1;
+
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+ if (!tcp_can_ingest(sk, in_skb))
tcp_clamp_window(sk);
else if (tcp_under_memory_pressure(sk))
tcp_adjust_rcv_ssthresh(sk);
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ if (tcp_can_ingest(sk, in_skb))
return 0;
tcp_collapse_ofo_queue(sk);
@@ -5671,7 +5777,7 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
NULL,
tp->copied_seq, tp->rcv_nxt);
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ if (tcp_can_ingest(sk, in_skb))
return 0;
/* Collapsing did not help, destructive actions follow.
@@ -5679,7 +5785,7 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
tcp_prune_ofo_queue(sk, in_skb);
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ if (tcp_can_ingest(sk, in_skb))
return 0;
/* If we are really being abused, tell the caller to silently
@@ -5972,6 +6078,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, int syn_inerr)
{
struct tcp_sock *tp = tcp_sk(sk);
+ bool accecn_reflector = false;
SKB_DR(reason);
/* RFC1323: H1. Apply PAWS check first. */
@@ -6005,7 +6112,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
step1:
/* Step 1: check sequence number */
- reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+ reason = tcp_sequence(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
if (reason) {
/* RFC793, page 37: "In all states except SYN-SENT, all reset
* (RST) segments are validated by checking their SEQ-fields."
@@ -6016,6 +6123,11 @@ step1:
if (!th->rst) {
if (th->syn)
goto syn_challenge;
+
+ if (reason == SKB_DROP_REASON_TCP_INVALID_SEQUENCE ||
+ reason == SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE)
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_BEYOND_WINDOW);
if (!tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDSEQ,
&tp->last_oow_ack_time))
@@ -6064,7 +6176,7 @@ step1:
if (tp->syn_fastopen && !tp->data_segs_in &&
sk->sk_state == TCP_ESTABLISHED)
tcp_fastopen_active_disable(sk);
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, false);
SKB_DR_SET(reason, TCP_RESET);
goto discard;
}
@@ -6075,6 +6187,16 @@ step1:
* RFC 5961 4.2 : Send a challenge ack
*/
if (th->syn) {
+ if (tcp_ecn_mode_accecn(tp)) {
+ accecn_reflector = true;
+ if (tp->rx_opt.accecn &&
+ tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
+
+ tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
+ tcp_accecn_opt_demand_min(sk, 1);
+ }
+ }
if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
@@ -6084,7 +6206,7 @@ syn_challenge:
if (syn_inerr)
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, accecn_reflector);
SKB_DR_SET(reason, TCP_INVALID_SYN);
goto discard;
}
@@ -6156,6 +6278,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
*/
tp->rx_opt.saw_tstamp = 0;
+ tp->rx_opt.accecn = 0;
/* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_prediction is to be made
@@ -6210,6 +6333,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
flag |= __tcp_replace_ts_recent(tp,
delta);
+ tcp_ecn_received_counters(sk, skb, 0);
+
/* We know that such packets are checksummed
* on entry.
*/
@@ -6234,6 +6359,10 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
if (tcp_checksum_complete(skb))
goto csum_error;
+ if (after(TCP_SKB_CB(skb)->end_seq,
+ tp->rcv_nxt + tcp_receive_window(tp)))
+ goto validate;
+
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
@@ -6254,6 +6383,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
/* Bulk data transfer: receiver */
tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_header_len);
+ tcp_ecn_received_counters(sk, skb,
+ len - tcp_header_len);
eaten = tcp_queue_rcv(sk, skb, &fragstolen);
tcp_event_data_recv(sk, skb);
@@ -6289,11 +6420,13 @@ slow_path:
/*
* Standard slow path.
*/
-
+validate:
if (!tcp_validate_incoming(sk, skb, th, 1))
return;
step5:
+ tcp_ecn_received_counters_payload(sk, skb);
+
reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
if ((int)reason < 0) {
reason = -reason;
@@ -6389,7 +6522,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
bool syn_drop = false;
- if (mss == tp->rx_opt.user_mss) {
+ if (mss == READ_ONCE(tp->rx_opt.user_mss)) {
struct tcp_options_received opt;
/* Get original SYNACK MSS value if user MSS sets mss_clamp */
@@ -6544,7 +6677,9 @@ consume:
* state to ESTABLISHED..."
*/
- tcp_ecn_rcv_synack(tp, th);
+ if (tcp_ecn_mode_any(tp))
+ tcp_ecn_rcv_synack(sk, skb, th,
+ TCP_SKB_CB(skb)->ip_dsfield);
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
tcp_try_undo_spurious_syn(sk);
@@ -6616,7 +6751,7 @@ consume:
TCP_DELACK_MAX, false);
goto consume;
}
- tcp_send_ack(sk);
+ tcp_send_ack_reflect_ect(sk, tcp_ecn_mode_accecn(tp));
return -1;
}
@@ -6675,7 +6810,7 @@ consume:
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tp->max_window = tp->snd_wnd;
- tcp_ecn_rcv_syn(tp, th);
+ tcp_ecn_rcv_syn(tp, th, skb);
tcp_mtup_init(sk);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -6728,7 +6863,7 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
tcp_try_undo_recovery(sk);
tcp_update_rto_time(tp);
- inet_csk(sk)->icsk_retransmits = 0;
+ WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
/* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
* retrans_stamp but don't enter CA_Loss, so in case that happened we
* need to zero retrans_stamp here to prevent spurious
@@ -6857,7 +6992,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
}
/* accept old ack during closing */
if ((int)reason < 0) {
- tcp_send_challenge_ack(sk);
+ tcp_send_challenge_ack(sk, false);
reason = -reason;
goto discard;
}
@@ -6904,9 +7039,12 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tp->lsndtime = tcp_jiffies32;
tcp_initialize_rcv_mss(sk);
+ if (tcp_ecn_mode_accecn(tp))
+ tcp_accecn_third_ack(sk, skb, tp->syn_ect_snt);
tcp_fast_path_on(tp);
if (sk->sk_shutdown & SEND_SHUTDOWN)
tcp_shutdown(sk, SEND_SHUTDOWN);
+
break;
case TCP_FIN_WAIT1: {
@@ -7076,6 +7214,15 @@ static void tcp_ecn_create_request(struct request_sock *req,
bool ect, ecn_ok;
u32 ecn_ok_dst;
+ if (tcp_accecn_syn_requested(th) &&
+ READ_ONCE(net->ipv4.sysctl_tcp_ecn) >= 3) {
+ inet_rsk(req)->ecn_ok = 1;
+ tcp_rsk(req)->accecn_ok = 1;
+ tcp_rsk(req)->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
+ INET_ECN_MASK;
+ return;
+ }
+
if (!th_ecn)
return;
@@ -7083,7 +7230,8 @@ static void tcp_ecn_create_request(struct request_sock *req,
ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
- if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+ if (((!ect || th->res1 || th->ae) && ecn_ok) ||
+ tcp_ca_needs_ecn(listen_sk) ||
(ecn_ok_dst & DST_FEATURE_ECN_CA) ||
tcp_bpf_ca_needs_ecn((struct sock *)req))
inet_rsk(req)->ecn_ok = 1;
@@ -7101,6 +7249,11 @@ static void tcp_openreq_init(struct request_sock *req,
tcp_rsk(req)->snt_synack = 0;
tcp_rsk(req)->snt_tsval_first = 0;
tcp_rsk(req)->last_oow_ack_time = 0;
+ tcp_rsk(req)->accecn_ok = 0;
+ tcp_rsk(req)->saw_accecn_opt = TCP_ACCECN_OPT_NOT_SEEN;
+ tcp_rsk(req)->accecn_fail_mode = 0;
+ tcp_rsk(req)->syn_ect_rcv = 0;
+ tcp_rsk(req)->syn_ect_snt = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -7140,8 +7293,8 @@ static bool tcp_syn_flood_action(struct sock *sk, const char *proto)
#endif
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
- if (!READ_ONCE(queue->synflood_warned) && syncookies != 2 &&
- xchg(&queue->synflood_warned, 1) == 0) {
+ if (syncookies != 2 && !READ_ONCE(queue->synflood_warned)) {
+ WRITE_ONCE(queue->synflood_warned, 1);
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) {
net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n",
proto, inet6_rcv_saddr(sk),
@@ -7209,7 +7362,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
return 0;
}
- mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss);
+ mss = tcp_parse_mss_option(th, READ_ONCE(tp->rx_opt.user_mss));
if (!mss)
mss = af_ops->mss_clamp;
@@ -7223,7 +7376,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
{
struct tcp_fastopen_cookie foc = { .len = -1 };
struct tcp_options_received tmp_opt;
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct sock *fastopen_sk = NULL;
struct request_sock *req;
@@ -7274,7 +7427,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = af_ops->mss_clamp;
- tmp_opt.user_mss = tp->rx_opt.user_mss;
+ tmp_opt.user_mss = READ_ONCE(tp->rx_opt.user_mss);
tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
want_cookie ? NULL : &foc);
@@ -7356,7 +7509,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
&foc, TCP_SYNACK_FASTOPEN, skb);
/* Add the child socket directly into the accept queue */
if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
- reqsk_fastopen_remove(fastopen_sk, req, false);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
goto drop_and_free;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6a14f9e6fef6..b1fcf3e4e1ce 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -58,11 +58,14 @@
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sock_diag.h>
+#include <net/aligned_data.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
@@ -72,6 +75,7 @@
#include <net/secure_seq.h>
#include <net/busy_poll.h>
#include <net/rstreason.h>
+#include <net/psp.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
@@ -290,9 +294,9 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_dport = usin->sin_port;
sk_daddr_set(sk, daddr);
- inet_csk(sk)->icsk_ext_hdr_len = 0;
+ inet_csk(sk)->icsk_ext_hdr_len = psp_sk_overhead(sk);
if (inet_opt)
- inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+ inet_csk(sk)->icsk_ext_hdr_len += inet_opt->opt.optlen;
tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
@@ -504,8 +508,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
struct sock *sk;
int err;
- sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->daddr, th->dest, iph->saddr,
+ sk = __inet_lookup_established(net, iph->daddr, th->dest, iph->saddr,
ntohs(th->source), inet_iif(skb), 0);
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
@@ -787,7 +790,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
- net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
+ net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb);
/* Invalid TCP option size or twice included auth */
if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh))
@@ -821,8 +824,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
- sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
- NULL, 0, ip_hdr(skb)->saddr,
+ sk1 = __inet_lookup_listener(net, NULL, 0, ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), dif, sdif);
/* don't send rst if it can't find key */
@@ -1189,7 +1191,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{
- const struct inet_request_sock *ireq = inet_rsk(req);
+ struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
int err = -1;
struct sk_buff *skb;
@@ -1202,6 +1204,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
if (skb) {
+ tcp_rsk(req)->syn_ect_snt = inet_sk(sk)->tos & INET_ECN_MASK;
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
tos = READ_ONCE(inet_sk(sk)->tos);
@@ -1503,9 +1506,9 @@ void tcp_clear_md5_list(struct sock *sk)
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
- hlist_del_rcu(&key->node);
+ hlist_del(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
- kfree_rcu(key, rcu);
+ kfree(key);
}
}
@@ -1703,7 +1706,6 @@ static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
- .rtx_syn_ack = tcp_rtx_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
@@ -1906,6 +1908,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
enum skb_drop_reason reason;
struct sock *rsk;
+ reason = psp_sk_rx_policy_check(sk, skb);
+ if (reason)
+ goto err_discard;
+
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst;
@@ -1967,6 +1973,7 @@ csum_err:
reason = SKB_DROP_REASON_TCP_CSUM;
trace_tcp_bad_csum(skb);
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+err_discard:
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard;
}
@@ -1991,8 +1998,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
if (th->doff < sizeof(struct tcphdr) / 4)
return 0;
- sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source,
+ sk = __inet_lookup_established(net, iph->saddr, th->source,
iph->daddr, ntohs(th->dest),
skb->skb_iif, inet_sdif(skb));
if (sk) {
@@ -2025,6 +2031,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
u32 gso_size;
u64 limit;
int delta;
+ int err;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
* we can fix skb->truesize to its real value to avoid future drops.
@@ -2068,7 +2075,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
(TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)) ||
!tcp_skb_can_collapse_rx(tail, skb) ||
thtail->doff != th->doff ||
- memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
+ memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)) ||
+ /* prior to PSP Rx policy check, retain exact PSP metadata */
+ psp_skb_coalesce_diff(tail, skb))
goto no_coalesce;
__skb_pull(skb, hdrlen);
@@ -2135,21 +2144,27 @@ no_coalesce:
limit = min_t(u64, limit, UINT_MAX);
- if (unlikely(sk_add_backlog(sk, skb, limit))) {
+ err = sk_add_backlog(sk, skb, limit);
+ if (unlikely(err)) {
bh_unlock_sock(sk);
- *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+ if (err == -ENOMEM) {
+ *reason = SKB_DROP_REASON_PFMEMALLOC;
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
+ } else {
+ *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+ }
return true;
}
return false;
}
EXPORT_IPV6_MOD(tcp_add_backlog);
-int tcp_filter(struct sock *sk, struct sk_buff *skb)
+int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason)
{
struct tcphdr *th = (struct tcphdr *)skb->data;
- return sk_filter_trim_cap(sk, skb, th->doff * 4);
+ return sk_filter_trim_cap(sk, skb, th->doff * 4, reason);
}
EXPORT_IPV6_MOD(tcp_filter);
@@ -2228,8 +2243,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
lookup:
- sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
- skb, __tcp_hdrlen(th), th->source,
+ sk = __inet_lookup_skb(skb, __tcp_hdrlen(th), th->source,
th->dest, sdif, &refcounted);
if (!sk)
goto no_tcp_socket;
@@ -2250,7 +2264,7 @@ lookup:
&iph->saddr, &iph->daddr,
AF_INET, dif, sdif);
if (unlikely(drop_reason)) {
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
reqsk_put(req);
goto discard_it;
}
@@ -2276,14 +2290,12 @@ lookup:
}
refcounted = true;
nsk = NULL;
- if (!tcp_filter(sk, skb)) {
+ if (!tcp_filter(sk, skb, &drop_reason)) {
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
nsk = tcp_check_req(sk, skb, req, false, &req_stolen,
&drop_reason);
- } else {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
}
if (!nsk) {
reqsk_put(req);
@@ -2339,10 +2351,9 @@ process:
nf_reset_ct(skb);
- if (tcp_filter(sk, skb)) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ if (tcp_filter(sk, skb, &drop_reason))
goto discard_and_relse;
- }
+
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
@@ -2398,7 +2409,7 @@ discard_it:
return 0;
discard_and_relse:
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
if (refcounted)
sock_put(sk);
goto discard_it;
@@ -2421,9 +2432,7 @@ do_time_wait:
&drop_reason);
switch (tw_status) {
case TCP_TW_SYN: {
- struct sock *sk2 = inet_lookup_listener(net,
- net->ipv4.tcp_death_row.hashinfo,
- skb, __tcp_hdrlen(th),
+ struct sock *sk2 = inet_lookup_listener(net, skb, __tcp_hdrlen(th),
iph->saddr, th->source,
iph->daddr, th->dest,
inet_iif(skb),
@@ -2436,6 +2445,10 @@ do_time_wait:
__this_cpu_write(tcp_tw_isn, isn);
goto process;
}
+
+ drop_reason = psp_twsk_rx_policy_check(inet_twsk(sk), skb);
+ if (drop_reason)
+ break;
}
/* to ACK */
fallthrough;
@@ -2454,7 +2467,6 @@ do_time_wait:
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
- .twsk_destructor= tcp_twsk_destructor,
};
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -2496,6 +2508,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
+
+static void tcp4_destruct_sock(struct sock *sk)
+{
+ tcp_md5_destruct_sock(sk);
+ tcp_ao_destroy_sock(sk, false);
+ inet_sock_destruct(sk);
+}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2511,23 +2530,12 @@ static int tcp_v4_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
+ sk->sk_destruct = tcp4_destruct_sock;
#endif
return 0;
}
-#ifdef CONFIG_TCP_MD5SIG
-static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
-{
- struct tcp_md5sig_info *md5sig;
-
- md5sig = container_of(head, struct tcp_md5sig_info, rcu);
- kfree(md5sig);
- static_branch_slow_dec_deferred(&tcp_md5_needed);
- tcp_md5_release_sigpool();
-}
-#endif
-
static void tcp_release_user_frags(struct sock *sk)
{
#ifdef CONFIG_PAGE_POOL
@@ -2564,19 +2572,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */
skb_rbtree_purge(&tp->out_of_order_queue);
-#ifdef CONFIG_TCP_MD5SIG
- /* Clean up the MD5 key list, if any */
- if (tp->md5sig_info) {
- struct tcp_md5sig_info *md5sig;
-
- md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
- tcp_clear_md5_list(sk);
- call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu);
- rcu_assign_pointer(tp->md5sig_info, NULL);
- }
-#endif
- tcp_ao_destroy_sock(sk, false);
-
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
@@ -2896,7 +2891,7 @@ static void get_openreq4(const struct request_sock *req,
jiffies_delta_to_clock_t(delta),
req->num_timeout,
from_kuid_munged(seq_user_ns(f),
- sock_i_uid(req->rsk_listener)),
+ sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0,
@@ -2953,9 +2948,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
- icsk->icsk_retransmits,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
- icsk->icsk_probes_out,
+ READ_ONCE(icsk->icsk_retransmits),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sk)),
+ READ_ONCE(icsk->icsk_probes_out),
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
@@ -3014,13 +3009,17 @@ out:
}
#ifdef CONFIG_BPF_SYSCALL
+union bpf_tcp_iter_batch_item {
+ struct sock *sk;
+ __u64 cookie;
+};
+
struct bpf_tcp_iter_state {
struct tcp_iter_state state;
unsigned int cur_sk;
unsigned int end_sk;
unsigned int max_sk;
- struct sock **batch;
- bool st_bucket_done;
+ union bpf_tcp_iter_batch_item *batch;
};
struct bpf_iter__tcp {
@@ -3043,21 +3042,32 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{
- while (iter->cur_sk < iter->end_sk)
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ union bpf_tcp_iter_batch_item *item;
+ unsigned int cur_sk = iter->cur_sk;
+ __u64 cookie;
+
+ /* Remember the cookies of the sockets we haven't seen yet, so we can
+ * pick up where we left off next time around.
+ */
+ while (cur_sk < iter->end_sk) {
+ item = &iter->batch[cur_sk++];
+ cookie = sock_gen_cookie(item->sk);
+ sock_gen_put(item->sk);
+ item->cookie = cookie;
+ }
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
- unsigned int new_batch_sz)
+ unsigned int new_batch_sz, gfp_t flags)
{
- struct sock **new_batch;
+ union bpf_tcp_iter_batch_item *new_batch;
new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
- GFP_USER | __GFP_NOWARN);
+ flags | __GFP_NOWARN);
if (!new_batch)
return -ENOMEM;
- bpf_iter_tcp_put_batch(iter);
+ memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk);
kvfree(iter->batch);
iter->batch = new_batch;
iter->max_sk = new_batch_sz;
@@ -3065,112 +3075,234 @@ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
return 0;
}
-static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
- struct sock *start_sk)
+static struct sock *bpf_iter_tcp_resume_bucket(struct sock *first_sk,
+ union bpf_tcp_iter_batch_item *cookies,
+ int n_cookies)
+{
+ struct hlist_nulls_node *node;
+ struct sock *sk;
+ int i;
+
+ for (i = 0; i < n_cookies; i++) {
+ sk = first_sk;
+ sk_nulls_for_each_from(sk, node)
+ if (cookies[i].cookie == atomic64_read(&sk->sk_cookie))
+ return sk;
+ }
+
+ return NULL;
+}
+
+static struct sock *bpf_iter_tcp_resume_listening(struct seq_file *seq)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
+ unsigned int find_cookie = iter->cur_sk;
+ unsigned int end_cookie = iter->end_sk;
+ int resume_bucket = st->bucket;
+ struct sock *sk;
+
+ if (end_cookie && find_cookie == end_cookie)
+ ++st->bucket;
+
+ sk = listening_get_first(seq);
+ iter->cur_sk = 0;
+ iter->end_sk = 0;
+
+ if (sk && st->bucket == resume_bucket && end_cookie) {
+ sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
+ end_cookie - find_cookie);
+ if (!sk) {
+ spin_unlock(&hinfo->lhash2[st->bucket].lock);
+ ++st->bucket;
+ sk = listening_get_first(seq);
+ }
+ }
+
+ return sk;
+}
+
+static struct sock *bpf_iter_tcp_resume_established(struct seq_file *seq)
+{
+ struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
+ struct bpf_tcp_iter_state *iter = seq->private;
+ struct tcp_iter_state *st = &iter->state;
+ unsigned int find_cookie = iter->cur_sk;
+ unsigned int end_cookie = iter->end_sk;
+ int resume_bucket = st->bucket;
+ struct sock *sk;
+
+ if (end_cookie && find_cookie == end_cookie)
+ ++st->bucket;
+
+ sk = established_get_first(seq);
+ iter->cur_sk = 0;
+ iter->end_sk = 0;
+
+ if (sk && st->bucket == resume_bucket && end_cookie) {
+ sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
+ end_cookie - find_cookie);
+ if (!sk) {
+ spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
+ ++st->bucket;
+ sk = established_get_first(seq);
+ }
+ }
+
+ return sk;
+}
+
+static struct sock *bpf_iter_tcp_resume(struct seq_file *seq)
+{
+ struct bpf_tcp_iter_state *iter = seq->private;
+ struct tcp_iter_state *st = &iter->state;
+ struct sock *sk = NULL;
+
+ switch (st->state) {
+ case TCP_SEQ_STATE_LISTENING:
+ sk = bpf_iter_tcp_resume_listening(seq);
+ if (sk)
+ break;
+ st->bucket = 0;
+ st->state = TCP_SEQ_STATE_ESTABLISHED;
+ fallthrough;
+ case TCP_SEQ_STATE_ESTABLISHED:
+ sk = bpf_iter_tcp_resume_established(seq);
+ break;
+ }
+
+ return sk;
+}
+
+static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
+ struct sock **start_sk)
+{
+ struct bpf_tcp_iter_state *iter = seq->private;
struct hlist_nulls_node *node;
unsigned int expected = 1;
struct sock *sk;
- sock_hold(start_sk);
- iter->batch[iter->end_sk++] = start_sk;
+ sock_hold(*start_sk);
+ iter->batch[iter->end_sk++].sk = *start_sk;
- sk = sk_nulls_next(start_sk);
+ sk = sk_nulls_next(*start_sk);
+ *start_sk = NULL;
sk_nulls_for_each_from(sk, node) {
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
+ } else if (!*start_sk) {
+ /* Remember where we left off. */
+ *start_sk = sk;
}
expected++;
}
}
- spin_unlock(&hinfo->lhash2[st->bucket].lock);
return expected;
}
static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
- struct sock *start_sk)
+ struct sock **start_sk)
{
- struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
- struct tcp_iter_state *st = &iter->state;
struct hlist_nulls_node *node;
unsigned int expected = 1;
struct sock *sk;
- sock_hold(start_sk);
- iter->batch[iter->end_sk++] = start_sk;
+ sock_hold(*start_sk);
+ iter->batch[iter->end_sk++].sk = *start_sk;
- sk = sk_nulls_next(start_sk);
+ sk = sk_nulls_next(*start_sk);
+ *start_sk = NULL;
sk_nulls_for_each_from(sk, node) {
if (seq_sk_match(seq, sk)) {
if (iter->end_sk < iter->max_sk) {
sock_hold(sk);
- iter->batch[iter->end_sk++] = sk;
+ iter->batch[iter->end_sk++].sk = sk;
+ } else if (!*start_sk) {
+ /* Remember where we left off. */
+ *start_sk = sk;
}
expected++;
}
}
- spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
return expected;
}
-static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
+static unsigned int bpf_iter_fill_batch(struct seq_file *seq,
+ struct sock **start_sk)
+{
+ struct bpf_tcp_iter_state *iter = seq->private;
+ struct tcp_iter_state *st = &iter->state;
+
+ if (st->state == TCP_SEQ_STATE_LISTENING)
+ return bpf_iter_tcp_listening_batch(seq, start_sk);
+ else
+ return bpf_iter_tcp_established_batch(seq, start_sk);
+}
+
+static void bpf_iter_tcp_unlock_bucket(struct seq_file *seq)
{
struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
struct bpf_tcp_iter_state *iter = seq->private;
struct tcp_iter_state *st = &iter->state;
+
+ if (st->state == TCP_SEQ_STATE_LISTENING)
+ spin_unlock(&hinfo->lhash2[st->bucket].lock);
+ else
+ spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
+}
+
+static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
+{
+ struct bpf_tcp_iter_state *iter = seq->private;
unsigned int expected;
- bool resized = false;
struct sock *sk;
+ int err;
- /* The st->bucket is done. Directly advance to the next
- * bucket instead of having the tcp_seek_last_pos() to skip
- * one by one in the current bucket and eventually find out
- * it has to advance to the next bucket.
- */
- if (iter->st_bucket_done) {
- st->offset = 0;
- st->bucket++;
- if (st->state == TCP_SEQ_STATE_LISTENING &&
- st->bucket > hinfo->lhash2_mask) {
- st->state = TCP_SEQ_STATE_ESTABLISHED;
- st->bucket = 0;
- }
- }
+ sk = bpf_iter_tcp_resume(seq);
+ if (!sk)
+ return NULL; /* Done */
-again:
- /* Get a new batch */
- iter->cur_sk = 0;
- iter->end_sk = 0;
- iter->st_bucket_done = false;
+ expected = bpf_iter_fill_batch(seq, &sk);
+ if (likely(iter->end_sk == expected))
+ goto done;
- sk = tcp_seek_last_pos(seq);
+ /* Batch size was too small. */
+ bpf_iter_tcp_unlock_bucket(seq);
+ bpf_iter_tcp_put_batch(iter);
+ err = bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2,
+ GFP_USER);
+ if (err)
+ return ERR_PTR(err);
+
+ sk = bpf_iter_tcp_resume(seq);
if (!sk)
return NULL; /* Done */
- if (st->state == TCP_SEQ_STATE_LISTENING)
- expected = bpf_iter_tcp_listening_batch(seq, sk);
- else
- expected = bpf_iter_tcp_established_batch(seq, sk);
+ expected = bpf_iter_fill_batch(seq, &sk);
+ if (likely(iter->end_sk == expected))
+ goto done;
- if (iter->end_sk == expected) {
- iter->st_bucket_done = true;
- return sk;
- }
-
- if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
- resized = true;
- goto again;
+ /* Batch size was still too small. Hold onto the lock while we try
+ * again with a larger batch to make sure the current bucket's size
+ * does not change in the meantime.
+ */
+ err = bpf_iter_tcp_realloc_batch(iter, expected, GFP_NOWAIT);
+ if (err) {
+ bpf_iter_tcp_unlock_bucket(seq);
+ return ERR_PTR(err);
}
- return sk;
+ expected = bpf_iter_fill_batch(seq, &sk);
+ WARN_ON_ONCE(iter->end_sk != expected);
+done:
+ bpf_iter_tcp_unlock_bucket(seq);
+ return iter->batch[0].sk;
}
static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3200,16 +3332,11 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
* meta.seq_num is used instead.
*/
st->num++;
- /* Move st->offset to the next sk in the bucket such that
- * the future start() will resume at st->offset in
- * st->bucket. See tcp_seek_last_pos().
- */
- st->offset++;
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ sock_gen_put(iter->batch[iter->cur_sk++].sk);
}
if (iter->cur_sk < iter->end_sk)
- sk = iter->batch[iter->cur_sk];
+ sk = iter->batch[iter->cur_sk].sk;
else
sk = bpf_iter_tcp_batch(seq);
@@ -3246,9 +3373,9 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
const struct request_sock *req = v;
uid = from_kuid_munged(seq_user_ns(seq),
- sock_i_uid(req->rsk_listener));
+ sk_uid(req->rsk_listener));
} else {
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
}
meta.seq = seq;
@@ -3275,10 +3402,8 @@ static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
(void)tcp_prog_seq_show(prog, &meta, v, 0);
}
- if (iter->cur_sk < iter->end_sk) {
+ if (iter->cur_sk < iter->end_sk)
bpf_iter_tcp_put_batch(iter);
- iter->st_bucket_done = false;
- }
}
static const struct seq_operations bpf_iter_tcp_seq_ops = {
@@ -3389,9 +3514,8 @@ struct proto tcp_prot = {
.leave_memory_pressure = tcp_leave_memory_pressure,
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
- .orphan_count = &tcp_orphan_count,
- .memory_allocated = &tcp_memory_allocated,
+ .memory_allocated = &net_aligned_data.tcp_memory_allocated,
.per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
.memory_pressure = &tcp_memory_pressure,
@@ -3448,7 +3572,9 @@ fallback:
static int __net_init tcp_sk_init(struct net *net)
{
- net->ipv4.sysctl_tcp_ecn = 2;
+ net->ipv4.sysctl_tcp_ecn = TCP_ECN_IN_ECN_OUT_NOECN;
+ net->ipv4.sysctl_tcp_ecn_option = TCP_ACCECN_OPTION_FULL;
+ net->ipv4.sysctl_tcp_ecn_option_beacon = TCP_ACCECN_OPTION_BEACON;
net->ipv4.sysctl_tcp_ecn_fallback = 1;
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
@@ -3596,7 +3722,7 @@ static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
if (err)
return err;
- err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
+ err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER);
if (err) {
bpf_iter_fini_seq_net(priv_data);
return err;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 4251670e328c..45b6ecd16412 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -166,11 +166,11 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
unsigned int hash)
{
struct tcp_metrics_block *tm;
- struct net *net;
bool reclaim = false;
+ struct net *net;
spin_lock_bh(&tcp_metrics_lock);
- net = dev_net_rcu(dst->dev);
+ net = dst_dev_net_rcu(dst);
/* While waiting for the spin-lock the cache might have been populated
* with this entry and so we have to check again.
@@ -273,7 +273,7 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
return NULL;
}
- net = dev_net_rcu(dst->dev);
+ net = dst_dev_net_rcu(dst);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
@@ -318,7 +318,7 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
else
return NULL;
- net = dev_net_rcu(dst->dev);
+ net = dst_dev_net_rcu(dst);
hash ^= net_hash_mix(net);
hash = hash_32(hash, tcp_metrics_hash_log);
@@ -912,7 +912,7 @@ static void tcp_metrics_flush_all(struct net *net)
spin_lock_bh(&tcp_metrics_lock);
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
match = net ? net_eq(tm_net(tm), net) :
- !refcount_read(&tm_net(tm)->ns.count);
+ !check_net(tm_net(tm));
if (match) {
rcu_assign_pointer(*pp, tm->tcpm_next);
kfree_rcu(tm, rcu_head);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43d7852ce07e..2ec8c6f1cdcc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -20,9 +20,11 @@
*/
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/xfrm.h>
#include <net/busy_poll.h>
#include <net/rstreason.h>
+#include <net/psp.h>
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
@@ -103,9 +105,16 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
struct tcp_options_received tmp_opt;
+ enum skb_drop_reason psp_drop;
bool paws_reject = false;
int ts_recent_stamp;
+ /* Instead of dropping immediately, wait to see what value is
+ * returned. We will accept a non psp-encapsulated syn in the
+ * case where TCP_TW_SYN is returned.
+ */
+ psp_drop = psp_twsk_rx_policy_check(tw, skb);
+
tmp_opt.saw_tstamp = 0;
ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
@@ -123,6 +132,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
+ if (psp_drop)
+ goto out_put;
+
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
@@ -193,6 +205,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
+ if (psp_drop)
+ goto out_put;
+
if (th->rst) {
/* This is TIME_WAIT assassination, in two flavors.
* Oh well... nobody has a sufficient solution to this
@@ -246,6 +261,9 @@ kill:
return TCP_TW_SYN;
}
+ if (psp_drop)
+ goto out_put;
+
if (paws_reject) {
*drop_reason = SKB_DROP_REASON_TCP_RFC7323_TW_PAWS;
__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWS_TW_REJECTED);
@@ -264,6 +282,8 @@ kill:
return tcp_timewait_check_oow_rate_limit(
tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
}
+
+out_put:
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
@@ -377,31 +397,22 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
}
EXPORT_SYMBOL(tcp_time_wait);
-#ifdef CONFIG_TCP_MD5SIG
-static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
-{
- struct tcp_md5sig_key *key;
-
- key = container_of(head, struct tcp_md5sig_key, rcu);
- kfree(key);
- static_branch_slow_dec_deferred(&tcp_md5_needed);
- tcp_md5_release_sigpool();
-}
-#endif
-
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
- if (twsk->tw_md5_key)
- call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
+ if (twsk->tw_md5_key) {
+ kfree(twsk->tw_md5_key);
+ static_branch_slow_dec_deferred(&tcp_md5_needed);
+ tcp_md5_release_sigpool();
+ }
}
#endif
tcp_ao_destroy_sock(sk, true);
+ psp_twsk_assoc_free(inet_twsk(sk));
}
-EXPORT_IPV6_MOD_GPL(tcp_twsk_destructor);
void tcp_twsk_purge(struct list_head *net_exit_list)
{
@@ -461,12 +472,26 @@ void tcp_openreq_init_rwin(struct request_sock *req,
ireq->rcv_wscale = rcv_wscale;
}
-static void tcp_ecn_openreq_child(struct tcp_sock *tp,
- const struct request_sock *req)
+static void tcp_ecn_openreq_child(struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb)
{
- tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ?
- TCP_ECN_MODE_RFC3168 :
- TCP_ECN_DISABLED);
+ const struct tcp_request_sock *treq = tcp_rsk(req);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (treq->accecn_ok) {
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ tp->syn_ect_snt = treq->syn_ect_snt;
+ tcp_accecn_third_ack(sk, skb, treq->syn_ect_snt);
+ tp->saw_accecn_opt = treq->saw_accecn_opt;
+ tp->prev_ecnfield = treq->syn_ect_rcv;
+ tp->accecn_opt_demand = 1;
+ tcp_ecn_received_counters_payload(sk, skb);
+ } else {
+ tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ?
+ TCP_ECN_MODE_RFC3168 :
+ TCP_ECN_DISABLED);
+ }
}
void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
@@ -631,7 +656,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
- tcp_ecn_openreq_child(newtp, req);
+ tcp_ecn_openreq_child(newsk, req, skb);
newtp->fastopen_req = NULL;
RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
@@ -674,6 +699,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
bool own_req;
tmp_opt.saw_tstamp = 0;
+ tmp_opt.accecn = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
@@ -726,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
LINUX_MIB_TCPACKSKIPPEDSYNRECV,
&tcp_rsk(req)->last_oow_ack_time) &&
- !inet_rtx_syn_ack(sk, req)) {
+ !tcp_rtx_synack(sk, req)) {
unsigned long expires = jiffies;
expires += reqsk_timeout(req, TCP_RTO_MAX);
@@ -851,6 +877,18 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!(flg & TCP_FLAG_ACK))
return NULL;
+ if (tcp_rsk(req)->accecn_ok && tmp_opt.accecn &&
+ tcp_rsk(req)->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ u8 saw_opt = tcp_accecn_option_init(skb, tmp_opt.accecn);
+
+ tcp_rsk(req)->saw_accecn_opt = saw_opt;
+ if (tcp_rsk(req)->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN) {
+ u8 fail_mode = TCP_ACCECN_OPT_FAIL_RECV;
+
+ tcp_rsk(req)->accecn_fail_mode |= fail_mode;
+ }
+ }
+
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index d293087b426d..2cb93da93abc 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -359,6 +359,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
flush |= skb->ip_summed != p->ip_summed;
flush |= skb->csum_level != p->csum_level;
flush |= NAPI_GRO_CB(p)->count >= 64;
+ skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
if (flush || skb_gro_receive_list(p, skb))
mss = 1;
@@ -433,8 +434,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
inet_get_iif_sdif(skb, &iif, &sdif);
iph = skb_gro_network_header(skb);
net = dev_net_rcu(skb->dev);
- sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source,
+ sk = __inet_lookup_established(net, iph->saddr, th->source,
iph->daddr, ntohs(th->dest),
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
@@ -484,6 +484,7 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
+ BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID << 1 != SKB_GSO_TCP_FIXEDID_INNER);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
(NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3ac8d2d17e1f..b94efb3050d2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -38,8 +38,10 @@
#define pr_fmt(fmt) "TCP: " fmt
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#include <net/mptcp.h>
#include <net/proto_memory.h>
+#include <net/psp.h>
#include <linux/compiler.h>
#include <linux/gfp.h>
@@ -319,60 +321,6 @@ static u16 tcp_select_window(struct sock *sk)
return new_win;
}
-/* Packet ECN state for a SYN-ACK */
-static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
-
- TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
- if (tcp_ecn_disabled(tp))
- TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
- else if (tcp_ca_needs_ecn(sk) ||
- tcp_bpf_ca_needs_ecn(sk))
- INET_ECN_xmit(sk);
-}
-
-/* Packet ECN state for a SYN. */
-static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
- bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
- tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
-
- if (!use_ecn) {
- const struct dst_entry *dst = __sk_dst_get(sk);
-
- if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
- use_ecn = true;
- }
-
- tp->ecn_flags = 0;
-
- if (use_ecn) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
- tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
- if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
- INET_ECN_xmit(sk);
- }
-}
-
-static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
-{
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
- /* tp->ecn_flags are cleared at a later point in time when
- * SYN ACK is ultimatively being received.
- */
- TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
-}
-
-static void
-tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
-{
- if (inet_rsk(req)->ecn_ok)
- th->ece = 1;
-}
-
/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
* be sent.
*/
@@ -381,7 +329,15 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (tcp_ecn_mode_rfc3168(tp)) {
+ if (!tcp_ecn_mode_any(tp))
+ return;
+
+ if (tcp_ecn_mode_accecn(tp)) {
+ if (!tcp_accecn_ace_fail_recv(tp))
+ INET_ECN_xmit(sk);
+ tcp_accecn_set_ace(tp, skb, th);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ACCECN;
+ } else {
/* Not-retransmitted data segment: set ECT and inject CWR. */
if (skb->len != tcp_header_len &&
!before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
@@ -403,13 +359,15 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
/* Constructs common control bits of non-data skb. If SYN/FIN is present,
* auto increment end seqno.
*/
-static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u16 flags)
+static void tcp_init_nondata_skb(struct sk_buff *skb, struct sock *sk,
+ u32 seq, u16 flags)
{
skb->ip_summed = CHECKSUM_PARTIAL;
TCP_SKB_CB(skb)->tcp_flags = flags;
tcp_skb_pcount_set(skb, 1);
+ psp_enqueue_set_decrypted(sk, skb);
TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
@@ -430,6 +388,7 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
#define OPTION_SMC BIT(9)
#define OPTION_MPTCP BIT(10)
#define OPTION_AO BIT(11)
+#define OPTION_ACCECN BIT(12)
static void smc_options_write(__be32 *ptr, u16 *options)
{
@@ -451,6 +410,8 @@ struct tcp_out_options {
u16 mss; /* 0 to disable */
u8 ws; /* window scale, 0 to disable */
u8 num_sack_blocks; /* number of SACK blocks to include */
+ u8 num_accecn_fields:7, /* number of AccECN fields needed */
+ use_synack_ecn_bytes:1; /* Use synack_ecn_bytes or not */
u8 hash_size; /* bytes in hash_location */
u8 bpf_opt_len; /* length of BPF hdr option */
__u8 *hash_location; /* temporary pointer, overloaded */
@@ -648,6 +609,11 @@ static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
return ptr;
}
+/* Initial values for AccECN option, ordered is based on ECN field bits
+ * similar to received_ecn_bytes. Used for SYN/ACK AccECN option.
+ */
+static const u32 synack_ecn_bytes[3] = { 0, 0, 0 };
+
/* Write previously computed TCP options to the packet.
*
* Beware: Something in the Internet is very sensitive to the ordering of
@@ -666,6 +632,8 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
struct tcp_out_options *opts,
struct tcp_key *key)
{
+ u8 leftover_highbyte = TCPOPT_NOP; /* replace 1st NOP if avail */
+ u8 leftover_lowbyte = TCPOPT_NOP; /* replace 2nd NOP in succession */
__be32 *ptr = (__be32 *)(th + 1);
u16 options = opts->options; /* mungable copy */
@@ -701,15 +669,75 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
*ptr++ = htonl(opts->tsecr);
}
+ if (OPTION_ACCECN & options) {
+ const u32 *ecn_bytes = opts->use_synack_ecn_bytes ?
+ synack_ecn_bytes :
+ tp->received_ecn_bytes;
+ const u8 ect0_idx = INET_ECN_ECT_0 - 1;
+ const u8 ect1_idx = INET_ECN_ECT_1 - 1;
+ const u8 ce_idx = INET_ECN_CE - 1;
+ u32 e0b;
+ u32 e1b;
+ u32 ceb;
+ u8 len;
+
+ e0b = ecn_bytes[ect0_idx] + TCP_ACCECN_E0B_INIT_OFFSET;
+ e1b = ecn_bytes[ect1_idx] + TCP_ACCECN_E1B_INIT_OFFSET;
+ ceb = ecn_bytes[ce_idx] + TCP_ACCECN_CEB_INIT_OFFSET;
+ len = TCPOLEN_ACCECN_BASE +
+ opts->num_accecn_fields * TCPOLEN_ACCECN_PERFIELD;
+
+ if (opts->num_accecn_fields == 2) {
+ *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
+ ((e1b >> 8) & 0xffff));
+ *ptr++ = htonl(((e1b & 0xff) << 24) |
+ (ceb & 0xffffff));
+ } else if (opts->num_accecn_fields == 1) {
+ *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
+ ((e1b >> 8) & 0xffff));
+ leftover_highbyte = e1b & 0xff;
+ leftover_lowbyte = TCPOPT_NOP;
+ } else if (opts->num_accecn_fields == 0) {
+ leftover_highbyte = TCPOPT_ACCECN1;
+ leftover_lowbyte = len;
+ } else if (opts->num_accecn_fields == 3) {
+ *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
+ ((e1b >> 8) & 0xffff));
+ *ptr++ = htonl(((e1b & 0xff) << 24) |
+ (ceb & 0xffffff));
+ *ptr++ = htonl(((e0b & 0xffffff) << 8) |
+ TCPOPT_NOP);
+ }
+ if (tp) {
+ tp->accecn_minlen = 0;
+ tp->accecn_opt_tstamp = tp->tcp_mstamp;
+ if (tp->accecn_opt_demand)
+ tp->accecn_opt_demand--;
+ }
+ }
+
if (unlikely(OPTION_SACK_ADVERTISE & options)) {
- *ptr++ = htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
+ *ptr++ = htonl((leftover_highbyte << 24) |
+ (leftover_lowbyte << 16) |
(TCPOPT_SACK_PERM << 8) |
TCPOLEN_SACK_PERM);
+ leftover_highbyte = TCPOPT_NOP;
+ leftover_lowbyte = TCPOPT_NOP;
}
if (unlikely(OPTION_WSCALE & options)) {
- *ptr++ = htonl((TCPOPT_NOP << 24) |
+ u8 highbyte = TCPOPT_NOP;
+
+ /* Do not split the leftover 2-byte to fit into a single
+ * NOP, i.e., replace this NOP only when 1 byte is leftover
+ * within leftover_highbyte.
+ */
+ if (unlikely(leftover_highbyte != TCPOPT_NOP &&
+ leftover_lowbyte == TCPOPT_NOP)) {
+ highbyte = leftover_highbyte;
+ leftover_highbyte = TCPOPT_NOP;
+ }
+ *ptr++ = htonl((highbyte << 24) |
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
opts->ws);
@@ -720,11 +748,13 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
tp->duplicate_sack : tp->selective_acks;
int this_sack;
- *ptr++ = htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
+ *ptr++ = htonl((leftover_highbyte << 24) |
+ (leftover_lowbyte << 16) |
(TCPOPT_SACK << 8) |
(TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
TCPOLEN_SACK_PERBLOCK)));
+ leftover_highbyte = TCPOPT_NOP;
+ leftover_lowbyte = TCPOPT_NOP;
for (this_sack = 0; this_sack < opts->num_sack_blocks;
++this_sack) {
@@ -733,6 +763,14 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
}
tp->rx_opt.dsack = 0;
+ } else if (unlikely(leftover_highbyte != TCPOPT_NOP ||
+ leftover_lowbyte != TCPOPT_NOP)) {
+ *ptr++ = htonl((leftover_highbyte << 24) |
+ (leftover_lowbyte << 16) |
+ (TCPOPT_NOP << 8) |
+ TCPOPT_NOP);
+ leftover_highbyte = TCPOPT_NOP;
+ leftover_lowbyte = TCPOPT_NOP;
}
if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
@@ -813,6 +851,80 @@ static void mptcp_set_option_cond(const struct request_sock *req,
}
}
+static u32 tcp_synack_options_combine_saving(struct tcp_out_options *opts)
+{
+ /* How much there's room for combining with the alignment padding? */
+ if ((opts->options & (OPTION_SACK_ADVERTISE | OPTION_TS)) ==
+ OPTION_SACK_ADVERTISE)
+ return 2;
+ else if (opts->options & OPTION_WSCALE)
+ return 1;
+ return 0;
+}
+
+/* Calculates how long AccECN option will fit to @remaining option space.
+ *
+ * AccECN option can sometimes replace NOPs used for alignment of other
+ * TCP options (up to @max_combine_saving available).
+ *
+ * Only solutions with at least @required AccECN fields are accepted.
+ *
+ * Returns: The size of the AccECN option excluding space repurposed from
+ * the alignment of the other options.
+ */
+static int tcp_options_fit_accecn(struct tcp_out_options *opts, int required,
+ int remaining)
+{
+ int size = TCP_ACCECN_MAXSIZE;
+ int sack_blocks_reduce = 0;
+ int max_combine_saving;
+ int rem = remaining;
+ int align_size;
+
+ if (opts->use_synack_ecn_bytes)
+ max_combine_saving = tcp_synack_options_combine_saving(opts);
+ else
+ max_combine_saving = opts->num_sack_blocks > 0 ? 2 : 0;
+ opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS;
+ while (opts->num_accecn_fields >= required) {
+ /* Pad to dword if cannot combine */
+ if ((size & 0x3) > max_combine_saving)
+ align_size = ALIGN(size, 4);
+ else
+ align_size = ALIGN_DOWN(size, 4);
+
+ if (rem >= align_size) {
+ size = align_size;
+ break;
+ } else if (opts->num_accecn_fields == required &&
+ opts->num_sack_blocks > 2 &&
+ required > 0) {
+ /* Try to fit the option by removing one SACK block */
+ opts->num_sack_blocks--;
+ sack_blocks_reduce++;
+ rem = rem + TCPOLEN_SACK_PERBLOCK;
+
+ opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS;
+ size = TCP_ACCECN_MAXSIZE;
+ continue;
+ }
+
+ opts->num_accecn_fields--;
+ size -= TCPOLEN_ACCECN_PERFIELD;
+ }
+ if (sack_blocks_reduce > 0) {
+ if (opts->num_accecn_fields >= required)
+ size -= sack_blocks_reduce * TCPOLEN_SACK_PERBLOCK;
+ else
+ opts->num_sack_blocks += sack_blocks_reduce;
+ }
+ if (opts->num_accecn_fields < required)
+ return 0;
+
+ opts->options |= OPTION_ACCECN;
+ return size;
+}
+
/* Compute TCP options for SYN packets. This is not the final
* network wire format yet.
*/
@@ -895,6 +1007,20 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
}
+ /* Simultaneous open SYN/ACK needs AccECN option but not SYN.
+ * It is attempted to negotiate the use of AccECN also on the first
+ * retransmitted SYN, as mentioned in "3.1.4.1. Retransmitted SYNs"
+ * of AccECN draft.
+ */
+ if (unlikely((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) &&
+ tcp_ecn_mode_accecn(tp) &&
+ inet_csk(sk)->icsk_retransmits < 2 &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) &&
+ remaining >= TCPOLEN_ACCECN_BASE)) {
+ opts->use_synack_ecn_bytes = 1;
+ remaining -= tcp_options_fit_accecn(opts, 0, remaining);
+ }
+
bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
return MAX_TCP_OPTION_SPACE - remaining;
@@ -912,6 +1038,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
+ struct tcp_request_sock *treq = tcp_rsk(req);
if (tcp_key_is_md5(key)) {
opts->options |= OPTION_MD5;
@@ -974,6 +1101,13 @@ static unsigned int tcp_synack_options(const struct sock *sk,
smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
+ if (treq->accecn_ok &&
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) &&
+ req->num_timeout < 1 && remaining >= TCPOLEN_ACCECN_BASE) {
+ opts->use_synack_ecn_bytes = 1;
+ remaining -= tcp_options_fit_accecn(opts, 0, remaining);
+ }
+
bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
synack_type, opts, &remaining);
@@ -1030,17 +1164,32 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
if (unlikely(eff_sacks)) {
const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
- if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
- TCPOLEN_SACK_PERBLOCK))
- return size;
+ if (likely(remaining >= TCPOLEN_SACK_BASE_ALIGNED +
+ TCPOLEN_SACK_PERBLOCK)) {
+ opts->num_sack_blocks =
+ min_t(unsigned int, eff_sacks,
+ (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
+ TCPOLEN_SACK_PERBLOCK);
+
+ size += TCPOLEN_SACK_BASE_ALIGNED +
+ opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
+ } else {
+ opts->num_sack_blocks = 0;
+ }
+ } else {
+ opts->num_sack_blocks = 0;
+ }
- opts->num_sack_blocks =
- min_t(unsigned int, eff_sacks,
- (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
- TCPOLEN_SACK_PERBLOCK);
+ if (tcp_ecn_mode_accecn(tp)) {
+ int ecn_opt = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option);
- size += TCPOLEN_SACK_BASE_ALIGNED +
- opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
+ if (ecn_opt && tp->saw_accecn_opt && !tcp_accecn_opt_fail_send(tp) &&
+ (ecn_opt >= TCP_ACCECN_OPTION_FULL || tp->accecn_opt_demand ||
+ tcp_accecn_option_beacon_check(sk))) {
+ opts->use_synack_ecn_bytes = 0;
+ size += tcp_options_fit_accecn(opts, tp->accecn_minlen,
+ MAX_TCP_OPTION_SPACE - size);
+ }
}
if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
@@ -1066,15 +1215,15 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
* needs to be reallocated in a driver.
* The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
*
- * Since transmit from skb destructor is forbidden, we use a tasklet
+ * Since transmit from skb destructor is forbidden, we use a BH work item
* to process all sockets that eventually need to send more skbs.
- * We use one tasklet per cpu, with its own queue of sockets.
+ * We use one work item per cpu, with its own queue of sockets.
*/
-struct tsq_tasklet {
- struct tasklet_struct tasklet;
+struct tsq_work {
+ struct work_struct work;
struct list_head head; /* queue of tcp sockets */
};
-static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
+static DEFINE_PER_CPU(struct tsq_work, tsq_work);
static void tcp_tsq_write(struct sock *sk)
{
@@ -1104,14 +1253,14 @@ static void tcp_tsq_handler(struct sock *sk)
bh_unlock_sock(sk);
}
/*
- * One tasklet per cpu tries to send more skbs.
- * We run in tasklet context but need to disable irqs when
+ * One work item per cpu tries to send more skbs.
+ * We run in BH context but need to disable irqs when
* transferring tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
-static void tcp_tasklet_func(struct tasklet_struct *t)
+static void tcp_tsq_workfn(struct work_struct *work)
{
- struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet);
+ struct tsq_work *tsq = container_of(work, struct tsq_work, work);
LIST_HEAD(list);
unsigned long flags;
struct list_head *q, *n;
@@ -1181,15 +1330,15 @@ void tcp_release_cb(struct sock *sk)
}
EXPORT_IPV6_MOD(tcp_release_cb);
-void __init tcp_tasklet_init(void)
+void __init tcp_tsq_work_init(void)
{
int i;
for_each_possible_cpu(i) {
- struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
+ struct tsq_work *tsq = &per_cpu(tsq_work, i);
INIT_LIST_HEAD(&tsq->head);
- tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
+ INIT_WORK(&tsq->work, tcp_tsq_workfn);
}
}
@@ -1203,11 +1352,11 @@ void tcp_wfree(struct sk_buff *skb)
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nval, oval;
- struct tsq_tasklet *tsq;
+ struct tsq_work *tsq;
bool empty;
/* Keep one reference on sk_wmem_alloc.
- * Will be released by sk_free() from here or tcp_tasklet_func()
+ * Will be released by sk_free() from here or tcp_tsq_workfn()
*/
WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
@@ -1229,13 +1378,13 @@ void tcp_wfree(struct sk_buff *skb)
nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
- /* queue this socket to tasklet queue */
+ /* queue this socket to BH workqueue */
local_irq_save(flags);
- tsq = this_cpu_ptr(&tsq_tasklet);
+ tsq = this_cpu_ptr(&tsq_work);
empty = list_empty(&tsq->head);
list_add(&tp->tsq_node, &tsq->head);
if (empty)
- tasklet_schedule(&tsq->tasklet);
+ queue_work(system_bh_wq, &tsq->work);
local_irq_restore(flags);
return;
out:
@@ -1437,7 +1586,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th,
opts.hash_location);
if (err) {
- kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_NOT_SPECIFIED);
return -ENOMEM;
}
}
@@ -1510,6 +1659,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
/* Advance write_seq and place onto the write_queue. */
WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
__skb_header_release(skb);
+ psp_enqueue_set_decrypted(sk, skb);
tcp_add_write_queue_tail(sk, skb);
sk_wmem_queued_add(sk, skb->truesize);
sk_mem_charge(sk, skb->truesize);
@@ -1554,11 +1704,6 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
if (tcp_is_reno(tp) && decr > 0)
tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
- if (tp->lost_skb_hint &&
- before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
- (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
- tp->lost_cnt_hint -= decr;
-
tcp_verify_left_out(tp);
}
@@ -2224,7 +2369,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
u32 max_segs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 send_win, cong_win, limit, in_flight;
+ u32 send_win, cong_win, limit, in_flight, threshold;
+ u64 srtt_in_ns, expected_ack, how_far_is_the_ack;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *head;
int win_divisor;
@@ -2286,9 +2432,19 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk);
if (!head)
goto send_now;
- delta = tp->tcp_clock_cache - head->tstamp;
- /* If next ACK is likely to come too late (half srtt), do not defer */
- if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
+
+ srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us;
+ /* When is the ACK expected ? */
+ expected_ack = head->tstamp + srtt_in_ns;
+ /* How far from now is the ACK expected ? */
+ how_far_is_the_ack = expected_ack - tp->tcp_clock_cache;
+
+ /* If next ACK is likely to come too late,
+ * ie in more than min(1ms, half srtt), do not defer.
+ */
+ threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC);
+
+ if ((s64)(how_far_is_the_ack - threshold) > 0)
goto send_now;
/* Ok, it looks like it is advisable to defer.
@@ -2639,7 +2795,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
/* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
- * after softirq/tasklet schedule.
+ * after softirq schedule.
* This helps when TX completions are delayed too much.
*/
if (tcp_rtx_queue_empty_or_single_skb(sk))
@@ -2755,6 +2911,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sent_pkts = 0;
tcp_mstamp_refresh(tp);
+
+ /* AccECN option beacon depends on mstamp, it may change mss */
+ if (tcp_ecn_mode_accecn(tp) && tcp_accecn_option_beacon_check(sk))
+ mss_now = tcp_current_mss(sk);
+
if (!push_one) {
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
@@ -3252,7 +3413,6 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
/* changed transmit queue under us so clear hints */
- tcp_clear_retrans_hints_partial(tp);
if (next_skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = skb;
@@ -3336,8 +3496,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (icsk->icsk_mtup.probe_size)
icsk->icsk_mtup.probe_size = 0;
- if (skb_still_in_host_queue(sk, skb))
- return -EBUSY;
+ if (skb_still_in_host_queue(sk, skb)) {
+ err = -EBUSY;
+ goto out;
+ }
start:
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
@@ -3348,14 +3510,19 @@ start:
}
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
WARN_ON_ONCE(1);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
+ }
+ if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) {
+ err = -ENOMEM;
+ goto out;
}
- if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
- return -ENOMEM;
}
- if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
- return -EHOSTUNREACH; /* Routing failure or similar. */
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
+ err = -EHOSTUNREACH; /* Routing failure or similar. */
+ goto out;
+ }
cur_mss = tcp_current_mss(sk);
avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -3366,8 +3533,10 @@ start:
* our retransmit of one segment serves as a zero window probe.
*/
if (avail_wnd <= 0) {
- if (TCP_SKB_CB(skb)->seq != tp->snd_una)
- return -EAGAIN;
+ if (TCP_SKB_CB(skb)->seq != tp->snd_una) {
+ err = -EAGAIN;
+ goto out;
+ }
avail_wnd = cur_mss;
}
@@ -3379,11 +3548,15 @@ start:
}
if (skb->len > len) {
if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
- cur_mss, GFP_ATOMIC))
- return -ENOMEM; /* We'll try again later. */
+ cur_mss, GFP_ATOMIC)) {
+ err = -ENOMEM; /* We'll try again later. */
+ goto out;
+ }
} else {
- if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
- return -ENOMEM;
+ if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) {
+ err = -ENOMEM;
+ goto out;
+ }
diff = tcp_skb_pcount(skb);
tcp_set_skb_tso_segs(skb, cur_mss);
@@ -3395,7 +3568,10 @@ start:
tcp_retrans_try_collapse(sk, skb, avail_wnd);
}
- /* RFC3168, section 6.1.1.1. ECN fallback */
+ /* RFC3168, section 6.1.1.1. ECN fallback
+ * As AccECN uses the same SYN flags (+ AE), this check covers both
+ * cases.
+ */
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
tcp_ecn_clear_syn(sk, skb);
@@ -3437,17 +3613,16 @@ start:
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
TCP_SKB_CB(skb)->seq, segs, err);
- if (likely(!err)) {
- trace_tcp_retransmit_skb(sk, skb);
- } else if (err != -EBUSY) {
+ if (unlikely(err) && err != -EBUSY)
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
- }
/* To avoid taking spuriously low RTT samples based on a timestamp
* for a transmit that never happened, always mark EVER_RETRANS
*/
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+out:
+ trace_tcp_retransmit_skb(sk, skb, err);
return err;
}
@@ -3572,9 +3747,8 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
sk_memory_allocated_add(sk, amt);
- if (mem_cgroup_sockets_enabled && sk->sk_memcg)
- mem_cgroup_charge_skmem(sk->sk_memcg, amt,
- gfp_memcg_charge() | __GFP_NOFAIL);
+ if (mem_cgroup_sk_enabled(sk))
+ mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge() | __GFP_NOFAIL);
}
/* Send a FIN. The caller locks the socket for us.
@@ -3619,7 +3793,7 @@ void tcp_send_fin(struct sock *sk)
skb_reserve(skb, MAX_TCP_HEADER);
sk_forced_mem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
- tcp_init_nondata_skb(skb, tp->write_seq,
+ tcp_init_nondata_skb(skb, sk, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
@@ -3647,7 +3821,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority,
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_TCP_HEADER);
- tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
+ tcp_init_nondata_skb(skb, sk, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */
@@ -3885,6 +4059,7 @@ static void tcp_connect_init(struct sock *sk)
const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
+ u16 user_mss;
u32 rcv_wnd;
/* We'll fix this up when we get a response from the other end.
@@ -3897,8 +4072,9 @@ static void tcp_connect_init(struct sock *sk)
tcp_ao_connect_init(sk);
/* If user gave his TCP_MAXSEG, record it to clamp */
- if (tp->rx_opt.user_mss)
- tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
+ user_mss = READ_ONCE(tp->rx_opt.user_mss);
+ if (user_mss)
+ tp->rx_opt.mss_clamp = user_mss;
tp->max_window = 0;
tcp_mtup_init(sk);
tcp_sync_mss(sk, dst_mtu(dst));
@@ -3949,7 +4125,7 @@ static void tcp_connect_init(struct sock *sk)
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
- inet_csk(sk)->icsk_retransmits = 0;
+ WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
tcp_clear_retrans(tp);
}
@@ -4142,7 +4318,7 @@ int tcp_connect(struct sock *sk)
/* SYN eats a sequence byte, write_seq updated by
* tcp_connect_queue_skb().
*/
- tcp_init_nondata_skb(buff, tp->write_seq, TCPHDR_SYN);
+ tcp_init_nondata_skb(buff, sk, tp->write_seq, TCPHDR_SYN);
tcp_mstamp_refresh(tp);
tp->retrans_stamp = tcp_time_stamp_ts(tp);
tcp_connect_queue_skb(sk, buff);
@@ -4267,7 +4443,8 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags)
/* Reserve space for headers and prepare control bits. */
skb_reserve(buff, MAX_TCP_HEADER);
- tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK | flags);
+ tcp_init_nondata_skb(buff, sk,
+ tcp_acceptable_seq(sk), TCPHDR_ACK | flags);
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
@@ -4313,7 +4490,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
* end to send an ack. Don't queue or clone SKB, just
* send it.
*/
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
+ tcp_init_nondata_skb(skb, sk, tp->snd_una - !urgent, TCPHDR_ACK);
NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
}
@@ -4387,13 +4564,13 @@ void tcp_send_probe0(struct sock *sk)
if (tp->packets_out || tcp_write_queue_empty(sk)) {
/* Cancel probe timer, if it is not required. */
- icsk->icsk_probes_out = 0;
+ WRITE_ONCE(icsk->icsk_probes_out, 0);
icsk->icsk_backoff = 0;
icsk->icsk_probes_tstamp = 0;
return;
}
- icsk->icsk_probes_out++;
+ WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1);
if (err <= 0) {
if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
icsk->icsk_backoff++;
@@ -4431,6 +4608,7 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
tcp_sk_rw(sk)->total_retrans++;
}
trace_tcp_retransmit_synack(sk, req);
+ WRITE_ONCE(req->num_retrans, req->num_retrans + 1);
}
return res;
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index bba10110fbbc..c52fd3254b6e 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -35,7 +35,7 @@ s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
}
-/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
+/* RACK loss detection (IETF RFC8985):
*
* Marks a packet lost, if some packet sent later has been (s)acked.
* The underlying idea is similar to the traditional dupthresh and FACK
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index bb37e24b97a7..2dd73a4e8e51 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -392,7 +392,7 @@ static void tcp_probe_timer(struct sock *sk)
int max_probes;
if (tp->packets_out || !skb) {
- icsk->icsk_probes_out = 0;
+ WRITE_ONCE(icsk->icsk_probes_out, 0);
icsk->icsk_probes_tstamp = 0;
return;
}
@@ -444,7 +444,7 @@ static void tcp_update_rto_stats(struct sock *sk)
tp->total_rto_recoveries++;
tp->rto_stamp = tcp_time_stamp_ms(tp);
}
- icsk->icsk_retransmits++;
+ WRITE_ONCE(icsk->icsk_retransmits, icsk->icsk_retransmits + 1);
tp->total_rto++;
}
@@ -478,7 +478,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
* regular retransmit because if the child socket has been accepted
* it's not good to give up too easily.
*/
- inet_rtx_syn_ack(sk, req);
+ tcp_rtx_synack(sk, req);
req->num_timeout++;
tcp_update_rto_stats(sk);
if (!tp->retrans_stamp)
@@ -839,7 +839,7 @@ static void tcp_keepalive_timer(struct timer_list *t)
goto out;
}
if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
- icsk->icsk_probes_out++;
+ WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1);
elapsed = keepalive_intvl_when(tp);
} else {
/* If keepalive was lost due to local congestion,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index dde52b8050b8..30dfbf73729d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -68,7 +68,7 @@
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
- * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
+ * Derek Atkins <derek@ihtfp.com>: Add Encapsulation Support
* James Chapman : Add L2TP encapsulation type.
*/
@@ -127,8 +127,6 @@ struct udp_table udp_table __read_mostly;
long sysctl_udp_mem[3] __read_mostly;
EXPORT_IPV6_MOD(sysctl_udp_mem);
-atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
-EXPORT_IPV6_MOD(udp_memory_allocated);
DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc);
@@ -145,8 +143,8 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
unsigned long *bitmap,
struct sock *sk, unsigned int log)
{
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
@@ -158,7 +156,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(uid, sock_i_uid(sk2))) {
+ uid_eq(uid, sk_uid(sk2))) {
if (!bitmap)
return 0;
} else {
@@ -180,8 +178,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk)
{
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
@@ -195,7 +193,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(uid, sock_i_uid(sk2))) {
+ uid_eq(uid, sk_uid(sk2))) {
res = 0;
} else {
res = 1;
@@ -210,7 +208,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{
struct net *net = sock_net(sk);
- kuid_t uid = sock_i_uid(sk);
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
sk_for_each(sk2, &hslot->head) {
@@ -220,7 +218,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
(udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
+ sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false)) {
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
@@ -511,7 +509,7 @@ rescore:
/* compute_score is too long of a function to be
* inlined, and calling it again here yields
- * measureable overhead for some
+ * measurable overhead for some
* workloads. Work around it by jumping
* backwards to rescore 'result'.
*/
@@ -1445,7 +1443,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark,
ipc.tos & INET_DSCP_MASK, scope,
sk->sk_protocol, flow_flags, faddr, saddr,
- dport, inet->inet_sport, sk->sk_uid);
+ dport, inet->inet_sport,
+ sk_uid(sk));
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
@@ -1686,31 +1685,6 @@ static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
}
-/* Idea of busylocks is to let producers grab an extra spinlock
- * to relieve pressure on the receive_queue spinlock shared by consumer.
- * Under flood, this means that only one producer can be in line
- * trying to acquire the receive_queue spinlock.
- * These busylock can be allocated on a per cpu manner, instead of a
- * per socket one (that would consume a cache line per socket)
- */
-static int udp_busylocks_log __read_mostly;
-static spinlock_t *udp_busylocks __read_mostly;
-
-static spinlock_t *busylock_acquire(void *ptr)
-{
- spinlock_t *busy;
-
- busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
- spin_lock(busy);
- return busy;
-}
-
-static void busylock_release(spinlock_t *busy)
-{
- if (busy)
- spin_unlock(busy);
-}
-
static int udp_rmem_schedule(struct sock *sk, int size)
{
int delta;
@@ -1725,14 +1699,24 @@ static int udp_rmem_schedule(struct sock *sk, int size)
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
+ struct udp_prod_queue *udp_prod_queue;
+ struct sk_buff *next, *to_drop = NULL;
+ struct llist_node *ll_list;
unsigned int rmem, rcvbuf;
- spinlock_t *busy = NULL;
int size, err = -ENOMEM;
+ int total_size = 0;
+ int q_size = 0;
+ int dropcount;
+ int nb = 0;
rmem = atomic_read(&sk->sk_rmem_alloc);
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
size = skb->truesize;
+ udp_prod_queue = &udp_sk(sk)->udp_prod_queue[numa_node_id()];
+
+ rmem += atomic_read(&udp_prod_queue->rmem_alloc);
+
/* Immediately drop when the receive queue is full.
* Cast to unsigned int performs the boundary check for INT_MAX.
*/
@@ -1740,8 +1724,8 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
if (rcvbuf > INT_MAX >> 1)
goto drop;
- /* Always allow at least one packet for small buffer. */
- if (rmem > rcvbuf)
+ /* Accept the packet if queue is empty. */
+ if (rmem)
goto drop;
}
@@ -1754,42 +1738,77 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
if (rmem > (rcvbuf >> 1)) {
skb_condense(skb);
size = skb->truesize;
- busy = busylock_acquire(sk);
}
udp_set_dev_scratch(skb);
- atomic_add(size, &sk->sk_rmem_alloc);
+ atomic_add(size, &udp_prod_queue->rmem_alloc);
+
+ if (!llist_add(&skb->ll_node, &udp_prod_queue->ll_root))
+ return 0;
+
+ dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? sk_drops_read(sk) : 0;
spin_lock(&list->lock);
- err = udp_rmem_schedule(sk, size);
- if (err) {
- spin_unlock(&list->lock);
- goto uncharge_drop;
- }
- sk_forward_alloc_add(sk, -size);
+ ll_list = llist_del_all(&udp_prod_queue->ll_root);
- /* no need to setup a destructor, we will explicitly release the
- * forward allocated memory on dequeue
- */
- sock_skb_set_dropcount(sk, skb);
+ ll_list = llist_reverse_order(ll_list);
+
+ llist_for_each_entry_safe(skb, next, ll_list, ll_node) {
+ size = udp_skb_truesize(skb);
+ total_size += size;
+ err = udp_rmem_schedule(sk, size);
+ if (unlikely(err)) {
+ /* Free the skbs outside of locked section. */
+ skb->next = to_drop;
+ to_drop = skb;
+ continue;
+ }
+
+ q_size += size;
+ sk_forward_alloc_add(sk, -size);
+
+ /* no need to setup a destructor, we will explicitly release the
+ * forward allocated memory on dequeue
+ */
+ SOCK_SKB_CB(skb)->dropcount = dropcount;
+ nb++;
+ __skb_queue_tail(list, skb);
+ }
+
+ atomic_add(q_size, &sk->sk_rmem_alloc);
- __skb_queue_tail(list, skb);
spin_unlock(&list->lock);
- if (!sock_flag(sk, SOCK_DEAD))
- INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ /* Multiple threads might be blocked in recvmsg(),
+ * using prepare_to_wait_exclusive().
+ */
+ while (nb) {
+ INDIRECT_CALL_1(sk->sk_data_ready,
+ sock_def_readable, sk);
+ nb--;
+ }
+ }
- busylock_release(busy);
- return 0;
+ if (unlikely(to_drop)) {
+ for (nb = 0; to_drop != NULL; nb++) {
+ skb = to_drop;
+ to_drop = skb->next;
+ skb_mark_not_on_list(skb);
+ /* TODO: update SNMP values. */
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PROTO_MEM);
+ }
+ numa_drop_add(&udp_sk(sk)->drop_counters, nb);
+ }
+
+ atomic_sub(total_size, &udp_prod_queue->rmem_alloc);
-uncharge_drop:
- atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ return 0;
drop:
- atomic_inc(&sk->sk_drops);
- busylock_release(busy);
+ udp_drops_inc(sk);
return err;
}
EXPORT_IPV6_MOD_GPL(__udp_enqueue_schedule_skb);
@@ -1807,6 +1826,7 @@ void udp_destruct_common(struct sock *sk)
kfree_skb(skb);
}
udp_rmem_release(sk, total, 0, true);
+ kfree(up->udp_prod_queue);
}
EXPORT_IPV6_MOD_GPL(udp_destruct_common);
@@ -1818,10 +1838,11 @@ static void udp_destruct_sock(struct sock *sk)
int udp_init_sock(struct sock *sk)
{
- udp_lib_init_sock(sk);
+ int res = udp_lib_init_sock(sk);
+
sk->sk_destruct = udp_destruct_sock;
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
- return 0;
+ return res;
}
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
@@ -1829,6 +1850,11 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
sk_peek_offset_bwd(sk, len);
+ if (!skb_shared(skb)) {
+ skb_attempt_defer_free(skb);
+ return;
+ }
+
if (!skb_unref(skb))
return;
@@ -1853,7 +1879,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
IS_UDPLITE(sk));
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
__skb_unlink(skb, rcvq);
*total += skb->truesize;
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
@@ -2009,7 +2035,7 @@ try_again:
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite);
__UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite);
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
goto try_again;
}
@@ -2079,7 +2105,7 @@ try_again:
if (unlikely(err)) {
if (!peeking) {
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
@@ -2348,7 +2374,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{
- int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
@@ -2437,10 +2463,8 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
goto drop;
- }
udp_csum_pull_header(skb);
@@ -2452,7 +2476,7 @@ csum_error:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -2537,7 +2561,7 @@ start_lookup:
nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) {
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
@@ -2612,7 +2636,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
return 0;
}
-/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+/* wrapper for udp_queue_rcv_skb taking care of csum conversion and
* return code conversion for ip layer consumption
*/
static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
@@ -2810,7 +2834,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
return NULL;
}
-int udp_v4_early_demux(struct sk_buff *skb)
+enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
struct in_device *in_dev = NULL;
@@ -2824,7 +2848,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
- return 0;
+ return SKB_NOT_DROPPED_YET;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
@@ -2833,12 +2857,12 @@ int udp_v4_early_demux(struct sk_buff *skb)
in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
- return 0;
+ return SKB_NOT_DROPPED_YET;
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
iph->protocol);
if (!ours)
- return 0;
+ return SKB_NOT_DROPPED_YET;
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr,
@@ -2849,7 +2873,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
}
if (!sk)
- return 0;
+ return SKB_NOT_DROPPED_YET;
skb->sk = sk;
DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
@@ -2876,7 +2900,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
ip4h_dscp(iph),
skb->dev, in_dev, &itag);
}
- return 0;
+ return SKB_NOT_DROPPED_YET;
}
int udp_rcv(struct sk_buff *skb)
@@ -3234,7 +3258,7 @@ struct proto udp_prot = {
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
- .memory_allocated = &udp_memory_allocated,
+ .memory_allocated = &net_aligned_data.udp_memory_allocated,
.per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
.sysctl_mem = sysctl_udp_mem,
@@ -3386,10 +3410,10 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
udp_rqueue_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
- atomic_read(&sp->sk_drops));
+ sk_drops_read(sp));
}
int udp4_seq_show(struct seq_file *seq, void *v)
@@ -3629,7 +3653,7 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
@@ -3997,7 +4021,6 @@ static void __init bpf_iter_register(void)
void __init udp_init(void)
{
unsigned long limit;
- unsigned int i;
udp_table_init(&udp_table, "UDP");
limit = nr_free_buffer_pages() / 8;
@@ -4006,15 +4029,6 @@ void __init udp_init(void)
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
- /* 16 spinlocks per cpu */
- udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
- udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
- GFP_KERNEL);
- if (!udp_busylocks)
- panic("UDP: failed to alloc udp_busylocks\n");
- for (i = 0; i < (1U << udp_busylocks_log); i++)
- spin_lock_init(udp_busylocks + i);
-
if (register_pernet_subsys(&udp_sysctl_ops))
panic("UDP: failed to init sysctl parameters.\n");
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 38cb3a28e4ed..6e491c720c90 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -16,9 +16,9 @@
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
- struct nlattr *bc, bool net_admin)
+ bool net_admin)
{
- if (!inet_diag_bc_sk(bc, sk))
+ if (!inet_diag_bc_sk(cb->data, sk))
return 0;
return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
@@ -92,12 +92,8 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct net *net = sock_net(skb->sk);
- struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
- struct nlattr *bc;
- cb_data = cb->data;
- bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -130,7 +126,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
r->id.idiag_dport)
goto next;
- if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
+ if (sk_diag_dump(sk, skb, cb, r, net_admin) < 0) {
spin_unlock_bh(&hslot->lock);
goto done;
}
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index e1ff3a375996..c7142213fc21 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _UDP4_IMPL_H
#define _UDP4_IMPL_H
+#include <net/aligned_data.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/protocol.h>
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 85b5aa82d7d7..19d0b5b09ffa 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -44,7 +44,7 @@ struct udp_tunnel_type_entry {
DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv);
static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call);
-static struct mutex udp_tunnel_gro_type_lock;
+static DEFINE_MUTEX(udp_tunnel_gro_type_lock);
static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES];
static unsigned int udp_tunnel_gro_type_nr;
static DEFINE_SPINLOCK(udp_tunnel_gro_lock);
@@ -144,11 +144,6 @@ out:
}
EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv);
-static void udp_tunnel_gro_init(void)
-{
- mutex_init(&udp_tunnel_gro_type_lock);
-}
-
static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
@@ -165,8 +160,6 @@ static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
#else
-static void udp_tunnel_gro_init(void) {}
-
static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk,
struct list_head *head,
struct sk_buff *skb)
@@ -224,7 +217,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
skb->remcsum_offload = remcsum;
- need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
+ need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb);
/* Try to offload checksum if possible */
offload_csum = !!(need_csum &&
!need_ipsec &&
@@ -767,6 +760,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
+ skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
ret = skb_gro_receive_list(p, skb);
} else {
skb_gro_postpull_rcsum(skb, uh,
@@ -897,8 +891,6 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
skb_gro_checksum_try_convert(skb, IPPROTO_UDP,
inet_gro_compute_pseudo);
skip:
- NAPI_GRO_CB(skb)->is_ipv6 = 0;
-
if (static_branch_unlikely(&udp_encap_needed_key))
sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
@@ -1000,6 +992,5 @@ int __init udpv4_offload_init(void)
},
};
- udp_tunnel_gro_init();
return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 2326548997d3..54386e06a813 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -4,6 +4,7 @@
#include <linux/socket.h>
#include <linux/kernel.h>
#include <net/dst_metadata.h>
+#include <net/flow.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/inet_dscp.h>
@@ -134,15 +135,17 @@ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
struct udp_tunnel_info ti;
struct net_device *dev;
+ ASSERT_RTNL();
+
ti.type = type;
ti.sa_family = sk->sk_family;
ti.port = inet_sk(sk)->inet_sport;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_add_port(dev, &ti);
+ udp_tunnel_nic_unlock(dev);
}
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
@@ -154,22 +157,24 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
struct udp_tunnel_info ti;
struct net_device *dev;
+ ASSERT_RTNL();
+
ti.type = type;
ti.sa_family = sk->sk_family;
ti.port = inet_sk(sk)->inet_sport;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_del_port(dev, &ti);
+ udp_tunnel_nic_unlock(dev);
}
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck)
+ bool xnet, bool nocheck, u16 ipcb_flags)
{
struct udphdr *uh;
@@ -185,7 +190,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
udp_set_csum(nocheck, skb, src, dst, skb->len);
- iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
+ iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet,
+ ipcb_flags);
}
EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
@@ -248,7 +254,7 @@ struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
fl4.saddr = key->u.ipv4.src;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
- fl4.flowi4_tos = tos & INET_DSCP_MASK;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(tos);
fl4.flowi4_flags = key->flow_flags;
rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index b6d2d16189c0..944b3cf25468 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -29,6 +29,7 @@ struct udp_tunnel_nic_table_entry {
* struct udp_tunnel_nic - UDP tunnel port offload state
* @work: async work for talking to hardware from process context
* @dev: netdev pointer
+ * @lock: protects all fields
* @need_sync: at least one port start changed
* @need_replay: space was freed, we need a replay of all ports
* @work_pending: @work is currently scheduled
@@ -41,6 +42,8 @@ struct udp_tunnel_nic {
struct net_device *dev;
+ struct mutex lock;
+
u8 need_sync:1;
u8 need_replay:1;
u8 work_pending:1;
@@ -298,22 +301,11 @@ __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
static void
udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
{
- const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
- bool may_sleep;
-
if (!utn->need_sync)
return;
- /* Drivers which sleep in the callback need to update from
- * the workqueue, if we come from the tunnel driver's notification.
- */
- may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
- if (!may_sleep)
- __udp_tunnel_nic_device_sync(dev, utn);
- if (may_sleep || utn->need_replay) {
- queue_work(udp_tunnel_nic_workqueue, &utn->work);
- utn->work_pending = 1;
- }
+ queue_work(udp_tunnel_nic_workqueue, &utn->work);
+ utn->work_pending = 1;
}
static bool
@@ -554,12 +546,12 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
struct udp_tunnel_nic *utn;
unsigned int i, j;
- ASSERT_RTNL();
-
utn = dev->udp_tunnel_nic;
if (!utn)
return;
+ mutex_lock(&utn->lock);
+
utn->need_sync = false;
for (i = 0; i < utn->n_tables; i++)
for (j = 0; j < info->tables[i].n_entries; j++) {
@@ -569,7 +561,7 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
- /* We don't release rtnl across ops */
+ /* We don't release utn lock across ops */
WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
if (!entry->use_cnt)
continue;
@@ -579,6 +571,8 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
}
__udp_tunnel_nic_device_sync(dev, utn);
+
+ mutex_unlock(&utn->lock);
}
static size_t
@@ -643,6 +637,33 @@ err_cancel:
return -EMSGSIZE;
}
+static void __udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ lockdep_assert_held(&utn->lock);
+}
+
+static void __udp_tunnel_nic_lock(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ mutex_lock(&utn->lock);
+}
+
+static void __udp_tunnel_nic_unlock(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ mutex_unlock(&utn->lock);
+}
+
static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
.get_port = __udp_tunnel_nic_get_port,
.set_port_priv = __udp_tunnel_nic_set_port_priv,
@@ -651,6 +672,9 @@ static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
.reset_ntf = __udp_tunnel_nic_reset_ntf,
.dump_size = __udp_tunnel_nic_dump_size,
.dump_write = __udp_tunnel_nic_dump_write,
+ .assert_locked = __udp_tunnel_nic_assert_locked,
+ .lock = __udp_tunnel_nic_lock,
+ .unlock = __udp_tunnel_nic_unlock,
};
static void
@@ -710,11 +734,15 @@ static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
container_of(work, struct udp_tunnel_nic, work);
rtnl_lock();
+ mutex_lock(&utn->lock);
+
utn->work_pending = 0;
__udp_tunnel_nic_device_sync(utn->dev, utn);
if (utn->need_replay)
udp_tunnel_nic_replay(utn->dev, utn);
+
+ mutex_unlock(&utn->lock);
rtnl_unlock();
}
@@ -730,6 +758,7 @@ udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
return NULL;
utn->n_tables = n_tables;
INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
+ mutex_init(&utn->lock);
for (i = 0; i < n_tables; i++) {
utn->entries[i] = kcalloc(info->tables[i].n_entries,
@@ -821,8 +850,11 @@ static int udp_tunnel_nic_register(struct net_device *dev)
dev_hold(dev);
dev->udp_tunnel_nic = utn;
- if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+ if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_get_rx_info(dev);
+ udp_tunnel_nic_unlock(dev);
+ }
return 0;
}
@@ -832,6 +864,8 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
{
const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+ udp_tunnel_nic_lock(dev);
+
/* For a shared table remove this dev from the list of sharing devices
* and if there are other devices just detach.
*/
@@ -841,8 +875,10 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
list_for_each_entry(node, &info->shared->devices, list)
if (node->dev == dev)
break;
- if (list_entry_is_head(node, &info->shared->devices, list))
+ if (list_entry_is_head(node, &info->shared->devices, list)) {
+ udp_tunnel_nic_unlock(dev);
return;
+ }
list_del(&node->list);
kfree(node);
@@ -852,6 +888,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
if (first) {
udp_tunnel_drop_rx_info(dev);
utn->dev = first->dev;
+ udp_tunnel_nic_unlock(dev);
goto release_dev;
}
@@ -862,6 +899,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
* from the work which we will boot immediately.
*/
udp_tunnel_nic_flush(dev, utn);
+ udp_tunnel_nic_unlock(dev);
/* Wait for the work to be done using the state, netdev core will
* retry unregister until we give up our reference on this device.
@@ -892,7 +930,7 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
err = udp_tunnel_nic_register(dev);
if (err)
- netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
+ netdev_warn(dev, "failed to register for UDP tunnel offloads: %d", err);
return notifier_from_errno(err);
}
/* All other events will need the udp_tunnel_nic state */
@@ -910,12 +948,16 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
return NOTIFY_DONE;
if (event == NETDEV_UP) {
+ udp_tunnel_nic_lock(dev);
WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
udp_tunnel_get_rx_info(dev);
+ udp_tunnel_nic_unlock(dev);
return NOTIFY_OK;
}
if (event == NETDEV_GOING_DOWN) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_flush(dev, utn);
+ udp_tunnel_nic_unlock(dev);
return NOTIFY_OK;
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index af37af3ab727..d3e621a11a1a 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -60,7 +60,7 @@ struct proto udplite_prot = {
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
- .memory_allocated = &udp_memory_allocated,
+ .memory_allocated = &net_aligned_data.udp_memory_allocated,
.per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 0d31a8c108d4..f28cfd88eaf5 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -202,6 +202,9 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
goto out;
+ /* set the transport header to ESP */
+ skb_set_transport_header(skb, offset);
+
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 3cff51ba72bb..0ae67d537499 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -31,7 +31,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, skb->dev, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst_dev(skb),
__xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7fb6205619e7..58faf1ddd2b1 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -14,7 +14,7 @@
#include <linux/inetdevice.h>
#include <net/dst.h>
#include <net/xfrm.h>
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/l3mdev.h>
@@ -25,7 +25,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = params->daddr->a4;
- fl4->flowi4_tos = inet_dscp_to_dsfield(params->dscp);
+ fl4->flowi4_dscp = params->dscp;
fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(params->net,
params->oif);
fl4->flowi4_mark = params->mark;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 1c9c686d9522..b8f9a8c0302e 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -304,10 +304,9 @@ config IPV6_SEG6_LWTUNNEL
config IPV6_SEG6_HMAC
bool "IPv6: Segment Routing HMAC support"
depends on IPV6
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA1
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Support for HMAC signature generation and verification
of SR-enabled packets.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ba2ec7c870cc..40e9c336f6c5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -239,6 +239,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
.ra_honor_pio_pflag = 0,
+ .force_forwarding = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -303,6 +304,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.ndisc_evict_nocarrier = 1,
.ra_honor_pio_life = 0,
.ra_honor_pio_pflag = 0,
+ .force_forwarding = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -857,6 +859,9 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
idev = __in6_dev_get_rtnl_net(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
+ /* Disabling all.forwarding sets 0 to force_forwarding for all interfaces */
+ if (newf == 0)
+ WRITE_ONCE(idev->cnf.force_forwarding, 0);
WRITE_ONCE(idev->cnf.forwarding, newf);
if (changed)
@@ -2229,32 +2234,29 @@ errdad:
in6_ifa_put(ifp);
}
-/* Join to solicited addr multicast group.
- * caller must hold RTNL */
+/* Join to solicited addr multicast group. */
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
+ if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
return;
addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_inc(dev, &maddr);
}
-/* caller must hold RTNL */
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
- if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
+ if (READ_ONCE(idev->dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
return;
addrconf_addr_solict_mult(addr, &maddr);
__ipv6_dev_mc_dec(idev, &maddr);
}
-/* caller must hold RTNL */
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
@@ -2267,7 +2269,6 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
__ipv6_dev_ac_inc(ifp->idev, &addr);
}
-/* caller must hold RTNL */
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
@@ -3208,7 +3209,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
}
}
-#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE)
static void add_v4_addrs(struct inet6_dev *idev)
{
struct in6_addr addr;
@@ -3367,7 +3368,7 @@ static int ipv6_generate_stable_address(struct in6_addr *address,
retry:
spin_lock_bh(&lock);
- sha1_init(digest);
+ sha1_init_raw(digest);
memset(&data, 0, sizeof(data));
memset(workspace, 0, sizeof(workspace));
memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
@@ -3463,6 +3464,7 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_IEEE1394) &&
(dev->type != ARPHRD_TUNNEL6) &&
(dev->type != ARPHRD_6LOWPAN) &&
+ (dev->type != ARPHRD_IP6GRE) &&
(dev->type != ARPHRD_TUNNEL) &&
(dev->type != ARPHRD_NONE) &&
(dev->type != ARPHRD_RAWIP)) {
@@ -3518,34 +3520,29 @@ static void addrconf_sit_config(struct net_device *dev)
}
#endif
-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
static void addrconf_gre_config(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
- idev = ipv6_find_idev(dev);
- if (IS_ERR(idev)) {
- pr_debug("%s: add_dev failed\n", __func__);
+ idev = addrconf_add_dev(dev);
+ if (IS_ERR(idev))
return;
- }
/* Generate the IPv6 link-local address using addrconf_addr_gen(),
* unless we have an IPv4 GRE device not bound to an IP address and
* which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
* case). Such devices fall back to add_v4_addrs() instead.
*/
- if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
+ if (!(*(__be32 *)dev->dev_addr == 0 &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
addrconf_addr_gen(idev, true);
return;
}
add_v4_addrs(idev);
-
- if (dev->flags & IFF_POINTOPOINT)
- addrconf_add_mroute(dev);
}
#endif
@@ -3557,8 +3554,7 @@ static void addrconf_init_auto_addrs(struct net_device *dev)
addrconf_sit_config(dev);
break;
#endif
-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
- case ARPHRD_IP6GRE:
+#if IS_ENABLED(CONFIG_NET_IPGRE)
case ARPHRD_IPGRE:
addrconf_gre_config(dev);
break;
@@ -3865,7 +3861,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
* Do not dev_put!
*/
if (unregister) {
- idev->dead = 1;
+ WRITE_ONCE(idev->dead, 1);
/* protected by rtnl_lock */
RCU_INIT_POINTER(dev->ip6_ptr, NULL);
@@ -5719,6 +5715,7 @@ static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
array[DEVCONF_ACCEPT_UNTRACKED_NA] =
READ_ONCE(cnf->accept_untracked_na);
array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
+ array[DEVCONF_FORCE_FORWARDING] = READ_ONCE(cnf->force_forwarding);
}
static inline size_t inet6_ifla6_size(void)
@@ -6081,7 +6078,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_type = dev->type;
ifindex = READ_ONCE(dev->ifindex);
hdr->ifi_index = ifindex;
- hdr->ifi_flags = dev_get_flags(dev);
+ hdr->ifi_flags = netif_get_flags(dev);
hdr->ifi_change = 0;
iflink = dev_get_iflink(dev);
@@ -6747,6 +6744,75 @@ static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write
return ret;
}
+static void addrconf_force_forward_change(struct net *net, __s32 newf)
+{
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+ for_each_netdev(net, dev) {
+ idev = __in6_dev_get_rtnl_net(dev);
+ if (idev) {
+ int changed = (!idev->cnf.force_forwarding) ^ (!newf);
+
+ WRITE_ONCE(idev->cnf.force_forwarding, newf);
+ if (changed)
+ inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
+ NETCONFA_FORCE_FORWARDING,
+ dev->ifindex, &idev->cnf);
+ }
+ }
+}
+
+static int addrconf_sysctl_force_forwarding(const struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct inet6_dev *idev = ctl->extra1;
+ struct ctl_table tmp_ctl = *ctl;
+ struct net *net = ctl->extra2;
+ int *valp = ctl->data;
+ int new_val = *valp;
+ int old_val = *valp;
+ loff_t pos = *ppos;
+ int ret;
+
+ tmp_ctl.extra1 = SYSCTL_ZERO;
+ tmp_ctl.extra2 = SYSCTL_ONE;
+ tmp_ctl.data = &new_val;
+
+ ret = proc_douintvec_minmax(&tmp_ctl, write, buffer, lenp, ppos);
+
+ if (write && old_val != new_val) {
+ if (!rtnl_net_trylock(net))
+ return restart_syscall();
+
+ WRITE_ONCE(*valp, new_val);
+
+ if (valp == &net->ipv6.devconf_dflt->force_forwarding) {
+ inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+ NETCONFA_FORCE_FORWARDING,
+ NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt);
+ } else if (valp == &net->ipv6.devconf_all->force_forwarding) {
+ inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+ NETCONFA_FORCE_FORWARDING,
+ NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all);
+
+ addrconf_force_forward_change(net, new_val);
+ } else {
+ inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
+ NETCONFA_FORCE_FORWARDING,
+ idev->dev->ifindex,
+ &idev->cnf);
+ }
+ rtnl_net_unlock(net);
+ }
+
+ if (ret)
+ *ppos = pos;
+ return ret;
+}
+
static int minus_one = -1;
static const int two_five_five = 255;
static u32 ioam6_if_id_max = U16_MAX;
@@ -7172,7 +7238,9 @@ static const struct ctl_table addrconf_sysctl[] = {
.data = &ipv6_devconf.rpl_seg_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "ioam6_enabled",
@@ -7217,6 +7285,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
+ {
+ .procname = "force_forwarding",
+ .data = &ipv6_devconf.force_forwarding,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_force_forwarding,
+ },
};
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index fb63ffbcfc64..567efd626ab4 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -20,12 +20,6 @@
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
-#if 0
-#define ADDRLABEL(x...) printk(x)
-#else
-#define ADDRLABEL(x...) do { ; } while (0)
-#endif
-
/*
* Policy Table
*/
@@ -150,8 +144,8 @@ u32 ipv6_addr_label(struct net *net,
label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT;
rcu_read_unlock();
- ADDRLABEL(KERN_DEBUG "%s(addr=%pI6, type=%d, ifindex=%d) => %08x\n",
- __func__, addr, type, ifindex, label);
+ net_dbg_ratelimited("%s(addr=%pI6, type=%d, ifindex=%d) => %08x\n", __func__, addr, type,
+ ifindex, label);
return label;
}
@@ -164,8 +158,8 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix,
struct ip6addrlbl_entry *newp;
int addrtype;
- ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u)\n",
- __func__, prefix, prefixlen, ifindex, (unsigned int)label);
+ net_dbg_ratelimited("%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u)\n", __func__,
+ prefix, prefixlen, ifindex, (unsigned int)label);
addrtype = ipv6_addr_type(prefix) & (IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK);
@@ -207,8 +201,7 @@ static int __ip6addrlbl_add(struct net *net, struct ip6addrlbl_entry *newp,
struct hlist_node *n;
int ret = 0;
- ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
- replace);
+ net_dbg_ratelimited("%s(newp=%p, replace=%d)\n", __func__, newp, replace);
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
if (p->prefixlen == newp->prefixlen &&
@@ -247,9 +240,8 @@ static int ip6addrlbl_add(struct net *net,
struct ip6addrlbl_entry *newp;
int ret = 0;
- ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n",
- __func__, prefix, prefixlen, ifindex, (unsigned int)label,
- replace);
+ net_dbg_ratelimited("%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n",
+ __func__, prefix, prefixlen, ifindex, (unsigned int)label, replace);
newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label);
if (IS_ERR(newp))
@@ -271,8 +263,8 @@ static int __ip6addrlbl_del(struct net *net,
struct hlist_node *n;
int ret = -ESRCH;
- ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
- __func__, prefix, prefixlen, ifindex);
+ net_dbg_ratelimited("%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix,
+ prefixlen, ifindex);
hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
if (p->prefixlen == prefixlen &&
@@ -294,8 +286,8 @@ static int ip6addrlbl_del(struct net *net,
struct in6_addr prefix_buf;
int ret;
- ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n",
- __func__, prefix, prefixlen, ifindex);
+ net_dbg_ratelimited("%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix,
+ prefixlen, ifindex);
ipv6_addr_prefix(&prefix_buf, prefix, prefixlen);
spin_lock(&net->ipv6.ip6addrlbl_table.lock);
@@ -312,8 +304,6 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
int err;
int i;
- ADDRLABEL(KERN_DEBUG "%s\n", __func__);
-
spin_lock_init(&net->ipv6.ip6addrlbl_table.lock);
INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index acaff1296783..1b0314644e0c 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -842,7 +842,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
@@ -857,7 +857,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
return PTR_ERR(dst);
}
- ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, false, false);
}
return 0;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index eb474f0987ae..95372e0f1d21 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -46,6 +46,34 @@ struct ah_skb_cb {
#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
+/* Helper to save IPv6 addresses and extension headers to temporary storage */
+static inline void ah6_save_hdrs(struct tmp_ext *iph_ext,
+ struct ipv6hdr *top_iph, int extlen)
+{
+ if (!extlen)
+ return;
+
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+ iph_ext->saddr = top_iph->saddr;
+#endif
+ iph_ext->daddr = top_iph->daddr;
+ memcpy(&iph_ext->hdrs, top_iph + 1, extlen - sizeof(*iph_ext));
+}
+
+/* Helper to restore IPv6 addresses and extension headers from temporary storage */
+static inline void ah6_restore_hdrs(struct ipv6hdr *top_iph,
+ struct tmp_ext *iph_ext, int extlen)
+{
+ if (!extlen)
+ return;
+
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+ top_iph->saddr = iph_ext->saddr;
+#endif
+ top_iph->daddr = iph_ext->daddr;
+ memcpy(top_iph + 1, &iph_ext->hdrs, extlen - sizeof(*iph_ext));
+}
+
static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
unsigned int size)
{
@@ -301,13 +329,7 @@ static void ah6_output_done(void *data, int err)
memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
- if (extlen) {
-#if IS_ENABLED(CONFIG_IPV6_MIP6)
- memcpy(&top_iph->saddr, iph_ext, extlen);
-#else
- memcpy(&top_iph->daddr, iph_ext, extlen);
-#endif
- }
+ ah6_restore_hdrs(top_iph, iph_ext, extlen);
kfree(AH_SKB_CB(skb)->tmp);
xfrm_output_resume(skb->sk, skb, err);
@@ -378,12 +400,8 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
*/
memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
+ ah6_save_hdrs(iph_ext, top_iph, extlen);
if (extlen) {
-#if IS_ENABLED(CONFIG_IPV6_MIP6)
- memcpy(iph_ext, &top_iph->saddr, extlen);
-#else
- memcpy(iph_ext, &top_iph->daddr, extlen);
-#endif
err = ipv6_clear_mutable_options(top_iph,
extlen - sizeof(*iph_ext) +
sizeof(*top_iph),
@@ -434,13 +452,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
- if (extlen) {
-#if IS_ENABLED(CONFIG_IPV6_MIP6)
- memcpy(&top_iph->saddr, iph_ext, extlen);
-#else
- memcpy(&top_iph->daddr, iph_ext, extlen);
-#endif
- }
+ ah6_restore_hdrs(top_iph, iph_ext, extlen);
out_free:
kfree(iph_base);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 21e01695b48c..52599584422b 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -47,6 +47,9 @@
static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(acaddr_hash_lock);
+#define ac_dereference(a, idev) \
+ rcu_dereference_protected(a, lockdep_is_held(&(idev)->lock))
+
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
static u32 inet6_acaddr_hash(const struct net *net,
@@ -64,14 +67,12 @@ static u32 inet6_acaddr_hash(const struct net *net,
int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_ac_socklist *pac = NULL;
+ struct net *net = sock_net(sk);
+ netdevice_tracker dev_tracker;
struct net_device *dev = NULL;
struct inet6_dev *idev;
- struct ipv6_ac_socklist *pac;
- struct net *net = sock_net(sk);
- int ishost = !net->ipv6.devconf_all->forwarding;
- int err = 0;
-
- ASSERT_RTNL();
+ int err = 0, ishost;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -79,32 +80,43 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -EINVAL;
if (ifindex)
- dev = __dev_get_by_index(net, ifindex);
+ dev = netdev_get_by_index(net, ifindex, &dev_tracker, GFP_KERNEL);
- if (ipv6_chk_addr_and_flags(net, addr, dev, true, 0, IFA_F_TENTATIVE))
- return -EINVAL;
+ if (ipv6_chk_addr_and_flags(net, addr, dev, true, 0, IFA_F_TENTATIVE)) {
+ err = -EINVAL;
+ goto error;
+ }
pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
- if (!pac)
- return -ENOMEM;
+ if (!pac) {
+ err = -ENOMEM;
+ goto error;
+ }
+
pac->acl_next = NULL;
pac->acl_addr = *addr;
+ ishost = !READ_ONCE(net->ipv6.devconf_all->forwarding);
+
if (ifindex == 0) {
struct rt6_info *rt;
+ rcu_read_lock();
rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
if (rt) {
- dev = rt->dst.dev;
+ dev = dst_dev_rcu(&rt->dst);
+ netdev_hold(dev, &dev_tracker, GFP_ATOMIC);
ip6_rt_put(rt);
} else if (ishost) {
+ rcu_read_unlock();
err = -EADDRNOTAVAIL;
goto error;
} else {
/* router, no matching interface: just pick one */
- dev = __dev_get_by_flags(net, IFF_UP,
- IFF_UP | IFF_LOOPBACK);
+ dev = netdev_get_by_flags_rcu(net, &dev_tracker, IFF_UP,
+ IFF_UP | IFF_LOOPBACK);
}
+ rcu_read_unlock();
}
if (!dev) {
@@ -112,7 +124,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
goto error;
}
- idev = __in6_dev_get(dev);
+ idev = in6_dev_get(dev);
if (!idev) {
if (ifindex)
err = -ENODEV;
@@ -120,8 +132,9 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
err = -EADDRNOTAVAIL;
goto error;
}
+
/* reset ishost, now that we have a specific device */
- ishost = !idev->cnf.forwarding;
+ ishost = !READ_ONCE(idev->cnf.forwarding);
pac->acl_ifindex = dev->ifindex;
@@ -134,7 +147,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
if (ishost)
err = -EADDRNOTAVAIL;
if (err)
- goto error;
+ goto error_idev;
}
err = __ipv6_dev_ac_inc(idev, addr);
@@ -144,7 +157,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
pac = NULL;
}
+error_idev:
+ in6_dev_put(idev);
error:
+ netdev_put(dev, &dev_tracker);
+
if (pac)
sock_kfree_s(sk, pac, sizeof(*pac));
return err;
@@ -155,12 +172,10 @@ error:
*/
int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct net_device *dev;
struct ipv6_ac_socklist *pac, *prev_pac;
+ struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
-
- ASSERT_RTNL();
+ struct net_device *dev;
prev_pac = NULL;
for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
@@ -176,9 +191,11 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
else
np->ipv6_ac_list = pac->acl_next;
- dev = __dev_get_by_index(net, pac->acl_ifindex);
- if (dev)
+ dev = dev_get_by_index(net, pac->acl_ifindex);
+ if (dev) {
ipv6_dev_ac_dec(dev, &pac->acl_addr);
+ dev_put(dev);
+ }
sock_kfree_s(sk, pac, sizeof(*pac));
return 0;
@@ -187,21 +204,20 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
void __ipv6_sock_ac_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct net *net = sock_net(sk);
struct net_device *dev = NULL;
struct ipv6_ac_socklist *pac;
- struct net *net = sock_net(sk);
- int prev_index;
+ int prev_index = 0;
- ASSERT_RTNL();
pac = np->ipv6_ac_list;
np->ipv6_ac_list = NULL;
- prev_index = 0;
while (pac) {
struct ipv6_ac_socklist *next = pac->acl_next;
if (pac->acl_ifindex != prev_index) {
- dev = __dev_get_by_index(net, pac->acl_ifindex);
+ dev_put(dev);
+ dev = dev_get_by_index(net, pac->acl_ifindex);
prev_index = pac->acl_ifindex;
}
if (dev)
@@ -209,6 +225,8 @@ void __ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
+
+ dev_put(dev);
}
void ipv6_sock_ac_close(struct sock *sk)
@@ -217,9 +235,8 @@ void ipv6_sock_ac_close(struct sock *sk)
if (!np->ipv6_ac_list)
return;
- rtnl_lock();
+
__ipv6_sock_ac_close(sk);
- rtnl_unlock();
}
static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
@@ -319,16 +336,14 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
struct net *net;
int err;
- ASSERT_RTNL();
-
write_lock_bh(&idev->lock);
if (idev->dead) {
err = -ENODEV;
goto out;
}
- for (aca = rtnl_dereference(idev->ac_list); aca;
- aca = rtnl_dereference(aca->aca_next)) {
+ for (aca = ac_dereference(idev->ac_list, idev); aca;
+ aca = ac_dereference(aca->aca_next, idev)) {
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
aca->aca_users++;
err = 0;
@@ -380,12 +395,10 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifacaddr6 *aca, *prev_aca;
- ASSERT_RTNL();
-
write_lock_bh(&idev->lock);
prev_aca = NULL;
- for (aca = rtnl_dereference(idev->ac_list); aca;
- aca = rtnl_dereference(aca->aca_next)) {
+ for (aca = ac_dereference(idev->ac_list, idev); aca;
+ aca = ac_dereference(aca->aca_next, idev)) {
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
prev_aca = aca;
@@ -414,14 +427,18 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
-/* called with rtnl_lock() */
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
{
- struct inet6_dev *idev = __in6_dev_get(dev);
+ struct inet6_dev *idev = in6_dev_get(dev);
+ int err;
if (!idev)
return -ENODEV;
- return __ipv6_dev_ac_dec(idev, addr);
+
+ err = __ipv6_dev_ac_dec(idev, addr);
+ in6_dev_put(idev);
+
+ return err;
}
void ipv6_ac_destroy_dev(struct inet6_dev *idev)
@@ -429,7 +446,7 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
- while ((aca = rtnl_dereference(idev->ac_list)) != NULL) {
+ while ((aca = ac_dereference(idev->ac_list, idev)) != NULL) {
rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 62618a058b8f..df1986973430 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -32,7 +32,7 @@
#include <linux/unaligned.h>
#include <linux/crc-ccitt.h>
-/* Maximium size of the calipso option including
+/* Maximum size of the calipso option including
* the two-byte TLV header.
*/
#define CALIPSO_OPT_LEN_MAX (2 + 252)
@@ -42,13 +42,13 @@
*/
#define CALIPSO_HDR_LEN (2 + 8)
-/* Maximium size of the calipso option including
+/* Maximum size of the calipso option including
* the two-byte TLV header and upto 3 bytes of
* leading pad and 7 bytes of trailing pad.
*/
#define CALIPSO_OPT_LEN_MAX_WITH_PAD (3 + CALIPSO_OPT_LEN_MAX + 7)
- /* Maximium size of u32 aligned buffer required to hold calipso
+ /* Maximum size of u32 aligned buffer required to hold calipso
* option. Max of 3 initial pad bytes starting from buffer + 3.
* i.e. the worst case is when the previous tlv finishes on 4n + 3.
*/
@@ -1207,6 +1207,10 @@ static int calipso_req_setattr(struct request_sock *req,
struct ipv6_opt_hdr *old, *new;
struct sock *sk = sk_to_full_sk(req_to_sk(req));
+ /* sk is NULL for SYN+ACK w/ SYN Cookie */
+ if (!sk)
+ return -ENOMEM;
+
if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt)
old = req_inet->ipv6_opt->hopopt;
else
@@ -1247,6 +1251,10 @@ static void calipso_req_delattr(struct request_sock *req)
struct ipv6_txoptions *txopts;
struct sock *sk = sk_to_full_sk(req_to_sk(req));
+ /* sk is NULL for SYN+ACK w/ SYN Cookie */
+ if (!sk)
+ return;
+
if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt)
return;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index fff78496803d..33ebe93d80e3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -53,7 +53,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6,
fl6->fl6_dport = inet->inet_dport;
fl6->fl6_sport = inet->inet_sport;
fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
if (!oif)
oif = np->sticky_pktinfo.ipi6_ifindex;
@@ -127,7 +127,7 @@ void ip6_datagram_release_cb(struct sock *sk)
rcu_read_lock();
dst = __sk_dst_get(sk);
- if (!dst || !dst->obsolete ||
+ if (!dst || !READ_ONCE(dst->obsolete) ||
dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
rcu_read_unlock();
return;
@@ -1064,9 +1064,9 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
sk_wmem_alloc_get(sp),
rqueue,
0, 0L, 0,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0,
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
- atomic_read(&sp->sk_drops));
+ sk_drops_read(sp));
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 72adfc107b55..e75da98f5283 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -149,8 +149,8 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
dport = encap->encap_dport;
spin_unlock_bh(&x->lock);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
- dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
+ sk = __inet6_lookup_established(net, &x->id.daddr.in6, dport,
+ &x->props.saddr.in6, ntohs(sport), 0, 0);
if (!sk)
return ERR_PTR(-ENOENT);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 457de0745a33..a23eb8734e15 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -306,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
- __IP6_INC_STATS(dev_net(dst->dev), idev,
+ __IP6_INC_STATS(dev_net(dst_dev(dst)), idev,
IPSTATS_MIB_INHDRERRORS);
fail_and_free:
kfree_skb(skb);
@@ -460,7 +460,7 @@ looped_back:
return -1;
}
- if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
+ if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
if (ipv6_hdr(skb)->hop_limit <= 1) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
@@ -494,10 +494,8 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
- accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled;
- if (accept_rpl_seg > idev->cnf.rpl_seg_enabled)
- accept_rpl_seg = idev->cnf.rpl_seg_enabled;
-
+ accept_rpl_seg = min(READ_ONCE(net->ipv6.devconf_all->rpl_seg_enabled),
+ READ_ONCE(idev->cnf.rpl_seg_enabled));
if (!accept_rpl_seg) {
kfree_skb(skb);
return -1;
@@ -621,7 +619,7 @@ looped_back:
return -1;
}
- if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
+ if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
if (ipv6_hdr(skb)->hop_limit <= 1) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
@@ -783,7 +781,7 @@ looped_back:
kfree_skb(skb);
return -1;
}
- if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
+ if (!ipv6_chk_home_addr(skb_dst_dev_net(skb), addr)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb);
return -1;
@@ -809,7 +807,7 @@ looped_back:
return -1;
}
- if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
+ if (skb_dst_dev(skb)->flags & IFF_LOOPBACK) {
if (ipv6_hdr(skb)->hop_limit <= 1) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3fd19a84b358..56c974cf75d1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -196,6 +196,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
struct flowi6 *fl6, bool apply_ratelimit)
{
struct net *net = sock_net(sk);
+ struct net_device *dev;
struct dst_entry *dst;
bool res = false;
@@ -208,10 +209,12 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
* this lookup should be more aggressive (not longer than timeout).
*/
dst = ip6_route_output(net, sk, fl6);
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
if (dst->error) {
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTNOROUTES);
- } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
+ } else if (dev && (dev->flags & IFF_LOOPBACK)) {
res = true;
} else {
struct rt6_info *rt = dst_rt6_info(dst);
@@ -222,14 +225,12 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- rcu_read_lock();
peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr);
res = inet_peer_xrlim_allow(peer, tmo);
- rcu_read_unlock();
}
+ rcu_read_unlock();
if (!res)
- __ICMP6_INC_STATS(net, ip6_dst_idev(dst),
- ICMP6_MIB_RATELIMITHOST);
+ __ICMP6_INC_STATS(net, NULL, ICMP6_MIB_RATELIMITHOST);
else
icmp_global_consume(net);
dst_release(dst);
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 7d574f5132e2..7bb9edc5c28c 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -70,7 +70,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
*/
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = orig_dst->dev->ifindex;
+ fl6.flowi6_oif = dst_dev(orig_dst)->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
&ip6h->daddr);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8f500eaf33cf..ea5cf3fdfdd6 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -45,7 +45,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
fl6->flowi6_mark = ireq->ir_mark;
fl6->fl6_dport = ireq->ir_rmt_port;
fl6->fl6_sport = htons(ireq->ir_num);
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi_common(fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
@@ -79,7 +79,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
rcu_read_lock();
@@ -91,7 +91,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (!IS_ERR(dst))
- ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, false, false);
}
return dst;
}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 76ee521189eb..5e1da088d8e1 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -47,24 +47,23 @@ EXPORT_SYMBOL_GPL(inet6_ehashfn);
* The sockhash lock must be held as a reader here.
*/
struct sock *__inet6_lookup_established(const struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct in6_addr *saddr,
- const __be16 sport,
- const struct in6_addr *daddr,
- const u16 hnum,
- const int dif, const int sdif)
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum,
+ const int dif, const int sdif)
{
- struct sock *sk;
- const struct hlist_nulls_node *node;
const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
- /* Optimize here for direct hit, only listening connections can
- * have wildcards anyways.
- */
- unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
- unsigned int slot = hash & hashinfo->ehash_mask;
- struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
-
+ const struct hlist_nulls_node *node;
+ struct inet_ehash_bucket *head;
+ struct inet_hashinfo *hashinfo;
+ unsigned int hash, slot;
+ struct sock *sk;
+ hashinfo = net->ipv4.tcp_death_row.hashinfo;
+ hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
+ slot = hash & hashinfo->ehash_mask;
+ head = &hashinfo->ehash[slot];
begin:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash)
@@ -200,19 +199,20 @@ struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
EXPORT_SYMBOL_GPL(inet6_lookup_run_sk_lookup);
struct sock *inet6_lookup_listener(const struct net *net,
- struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
- const struct in6_addr *saddr,
- const __be16 sport, const struct in6_addr *daddr,
- const unsigned short hnum, const int dif, const int sdif)
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const unsigned short hnum,
+ const int dif, const int sdif)
{
struct inet_listen_hashbucket *ilb2;
+ struct inet_hashinfo *hashinfo;
struct sock *result = NULL;
unsigned int hash2;
/* Lookup redirect from BPF */
- if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
- hashinfo == net->ipv4.tcp_death_row.hashinfo) {
+ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet6_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
saddr, sport, daddr, hnum, dif,
inet6_ehashfn);
@@ -220,6 +220,7 @@ struct sock *inet6_lookup_listener(const struct net *net,
goto done;
}
+ hashinfo = net->ipv4.tcp_death_row.hashinfo;
hash2 = ipv6_portaddr_hash(net, daddr, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
@@ -244,7 +245,6 @@ done:
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
struct sock *inet6_lookup(const struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
@@ -253,7 +253,7 @@ struct sock *inet6_lookup(const struct net *net,
struct sock *sk;
bool refcounted;
- sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
+ sk = __inet6_lookup(net, skb, doff, saddr, sport, daddr,
ntohs(dport), dif, 0, &refcounted);
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
@@ -305,8 +305,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
dif, sdif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
- if (sk->sk_protocol == IPPROTO_TCP &&
- tcp_twsk_unique(sk, sk2, twp))
+ if (tcp_twsk_unique(sk, sk2, twp))
break;
}
goto not_unique;
@@ -369,14 +368,3 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
__inet6_check_established);
}
EXPORT_SYMBOL_GPL(inet6_hash_connect);
-
-int inet6_hash(struct sock *sk)
-{
- int err = 0;
-
- if (sk->sk_state != TCP_CLOSE)
- err = __inet_hash(sk, NULL);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(inet6_hash);
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index a84d332f952f..9553a3200081 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -696,6 +696,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
struct ioam6_schema *sc,
u8 sclen, bool is_input)
{
+ struct net_device *dev = skb_dst_dev(skb);
struct timespec64 ts;
ktime_t tstamp;
u64 raw64;
@@ -712,7 +713,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (is_input)
byte--;
- raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
+ raw32 = dev_net(dev)->ipv6.sysctl.ioam6_id;
*(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
data += sizeof(__be32);
@@ -728,10 +729,10 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
- if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
+ if (dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -783,10 +784,10 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
struct Qdisc *qdisc;
__u32 qlen, backlog;
- if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
+ if (dev->flags & IFF_LOOPBACK) {
*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
} else {
- queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
+ queue = skb_get_tx_queue(dev, skb);
qdisc = rcu_dereference(queue->qdisc);
qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
@@ -807,7 +808,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (is_input)
byte--;
- raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
+ raw64 = dev_net(dev)->ipv6.sysctl.ioam6_id_wide;
*(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
data += sizeof(__be64);
@@ -823,10 +824,10 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
- if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
+ if (dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
+ raw32 = READ_ONCE(__in6_dev_get(dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 40df8bdfaacd..1fe7894f14dd 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -335,7 +335,7 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
if (has_tunsrc)
memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
else
- ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
+ ipv6_dev_get_saddr(net, dst_dev(dst), &hdr->daddr,
IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
skb_postpush_rcsum(skb, hdr, len);
@@ -442,7 +442,7 @@ do_encap:
dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
local_bh_enable();
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
if (unlikely(err))
goto drop;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 93578b2ec35f..02c16909f618 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -445,15 +445,17 @@ struct fib6_dump_arg {
static int fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg)
{
enum fib_event_type fib_event = FIB_EVENT_ENTRY_REPLACE;
+ unsigned int nsiblings;
int err;
if (!rt || rt == arg->net->ipv6.fib6_null_entry)
return 0;
- if (rt->fib6_nsiblings)
+ nsiblings = READ_ONCE(rt->fib6_nsiblings);
+ if (nsiblings)
err = call_fib6_multipath_entry_notifier(arg->nb, fib_event,
rt,
- rt->fib6_nsiblings,
+ nsiblings,
arg->extack);
else
err = call_fib6_entry_notifier(arg->nb, fib_event, rt,
@@ -963,8 +965,7 @@ insert_above:
}
static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
- const struct fib6_info *match,
- const struct fib6_table *table)
+ const struct fib6_info *match)
{
int cpu;
@@ -999,21 +1000,15 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
rcu_read_unlock();
}
-struct fib6_nh_pcpu_arg {
- struct fib6_info *from;
- const struct fib6_table *table;
-};
-
static int fib6_nh_drop_pcpu_from(struct fib6_nh *nh, void *_arg)
{
- struct fib6_nh_pcpu_arg *arg = _arg;
+ struct fib6_info *arg = _arg;
- __fib6_drop_pcpu_from(nh, arg->from, arg->table);
+ __fib6_drop_pcpu_from(nh, arg);
return 0;
}
-static void fib6_drop_pcpu_from(struct fib6_info *f6i,
- const struct fib6_table *table)
+static void fib6_drop_pcpu_from(struct fib6_info *f6i)
{
/* Make sure rt6_make_pcpu_route() wont add other percpu routes
* while we are cleaning them here.
@@ -1022,19 +1017,14 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */
if (f6i->nh) {
- struct fib6_nh_pcpu_arg arg = {
- .from = f6i,
- .table = table
- };
-
rcu_read_lock();
- nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, &arg);
+ nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, f6i);
rcu_read_unlock();
} else {
struct fib6_nh *fib6_nh;
fib6_nh = f6i->fib6_nh;
- __fib6_drop_pcpu_from(fib6_nh, f6i, table);
+ __fib6_drop_pcpu_from(fib6_nh, f6i);
}
}
@@ -1045,7 +1035,7 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
/* Flush all cached dst in exception table */
rt6_flush_exceptions(rt);
- fib6_drop_pcpu_from(rt, table);
+ fib6_drop_pcpu_from(rt);
if (rt->nh) {
spin_lock(&rt->nh->lock);
@@ -1138,7 +1128,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->fib6_nsiblings)
- rt->fib6_nsiblings = 0;
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->fib6_flags & RTF_EXPIRES)) {
@@ -1167,7 +1157,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
*/
if (rt_can_ecmp &&
rt6_qualify_for_ecmp(iter))
- rt->fib6_nsiblings++;
+ WRITE_ONCE(rt->fib6_nsiblings,
+ rt->fib6_nsiblings + 1);
}
if (iter->fib6_metric > rt->fib6_metric)
@@ -1217,7 +1208,8 @@ next_iter:
fib6_nsiblings = 0;
list_for_each_entry_safe(sibling, temp_sibling,
&rt->fib6_siblings, fib6_siblings) {
- sibling->fib6_nsiblings++;
+ WRITE_ONCE(sibling->fib6_nsiblings,
+ sibling->fib6_nsiblings + 1);
BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
fib6_nsiblings++;
}
@@ -1264,8 +1256,9 @@ add:
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings,
fib6_siblings)
- sibling->fib6_nsiblings--;
- rt->fib6_nsiblings = 0;
+ WRITE_ONCE(sibling->fib6_nsiblings,
+ sibling->fib6_nsiblings - 1);
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
list_del_rcu(&rt->fib6_siblings);
rcu_read_lock();
rt6_multipath_rebalance(next_sibling);
@@ -2014,8 +2007,9 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
notify_del = true;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings)
- sibling->fib6_nsiblings--;
- rt->fib6_nsiblings = 0;
+ WRITE_ONCE(sibling->fib6_nsiblings,
+ sibling->fib6_nsiblings - 1);
+ WRITE_ONCE(rt->fib6_nsiblings, 0);
list_del_rcu(&rt->fib6_siblings);
rt6_multipath_rebalance(next_sibling);
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 2dc9dcffe2ca..c82a75510c0e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -111,8 +111,32 @@ static u32 HASH_ADDR(const struct in6_addr *addr)
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
-/* Given src, dst and key, find appropriate for input tunnel. */
+static bool ip6gre_tunnel_match(struct ip6_tnl *t, int dev_type, int link,
+ int *cand_score, struct ip6_tnl **ret)
+{
+ int score = 0;
+
+ if (t->dev->type != ARPHRD_IP6GRE &&
+ t->dev->type != dev_type)
+ return false;
+
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0) {
+ *ret = t;
+ return true;
+ }
+
+ if (score < *cand_score) {
+ *ret = t;
+ *cand_score = score;
+ }
+ return false;
+}
+/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
const struct in6_addr *remote, const struct in6_addr *local,
__be32 key, __be16 gre_proto)
@@ -127,8 +151,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
gre_proto == htons(ETH_P_ERSPAN) ||
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
- int score, cand_score = 4;
struct net_device *ndev;
+ int cand_score = 4;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -137,22 +161,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
@@ -161,22 +171,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
@@ -187,22 +183,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
@@ -210,22 +192,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
if (cand)
@@ -361,9 +329,9 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (parms->name[0]) {
if (!dev_valid_name(parms->name))
return NULL;
- strscpy(name, parms->name, IFNAMSIZ);
+ strscpy(name, parms->name);
} else {
- strcpy(name, "ip6gre%d");
+ strscpy(name, "ip6gre%d");
}
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6gre_tunnel_setup);
@@ -1085,9 +1053,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */
- if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
- dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
-
+ if (!t->parms.collect_md && dst) {
+ mtu = READ_ONCE(dst_dev(dst)->mtu);
+ if (dst_mtu(dst) > mtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
+ }
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
NEXTHDR_GRE);
if (err != 0) {
@@ -1499,7 +1469,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
+ strscpy(tunnel->parms.name, dev->name);
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (ret)
@@ -1559,7 +1529,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
tunnel->dev = dev;
tunnel->net = dev_net(dev);
- strcpy(tunnel->parms.name, dev->name);
+ strscpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
}
@@ -1872,7 +1842,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
+ strscpy(tunnel->parms.name, dev->name);
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
if (ret)
diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
index 9e3574880cb0..233914b63bdb 100644
--- a/net/ipv6/ip6_icmp.c
+++ b/net/ipv6/ip6_icmp.c
@@ -54,11 +54,12 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
struct inet6_skb_parm parm = { 0 };
struct sk_buff *cloned_skb = NULL;
enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
struct in6_addr orig_ip;
struct nf_conn *ct;
ct = nf_ct_get(skb_in, &ctinfo);
- if (!ct || !(ct->status & IPS_SRC_NAT)) {
+ if (!ct || !(READ_ONCE(ct->status) & IPS_NAT_MASK)) {
__icmpv6_send(skb_in, type, code, info, &parm);
return;
}
@@ -73,7 +74,8 @@ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
goto out;
orig_ip = ipv6_hdr(skb_in)->saddr;
- ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
+ dir = CTINFO2DIR(ctinfo);
+ ipv6_hdr(skb_in)->saddr = ct->tuplehash[dir].tuple.src.u3.in6;
__icmpv6_send(skb_in, type, code, info, &parm);
ipv6_hdr(skb_in)->saddr = orig_ip;
out:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 39da6a7ce5f1..168ec07e31cc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -187,7 +187,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
* arrived via the sending interface (ethX), because of the
* nature of scoping architecture. --yoshfuji
*/
- IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+ IP6CB(skb)->iif = skb_valid_dst(skb) ?
+ ip6_dst_idev(skb_dst(skb))->dev->ifindex :
+ dev->ifindex;
if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err;
@@ -476,6 +478,13 @@ discard:
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_INDISCARDS);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
+ return 0;
+ }
+
skb_clear_delivery_time(skb);
ip6_protocol_deliver_rcu(net, skb, 0, false);
@@ -499,38 +508,32 @@ EXPORT_SYMBOL_GPL(ip6_input);
int ip6_mc_input(struct sk_buff *skb)
{
+ struct net_device *dev = skb->dev;
int sdif = inet6_sdif(skb);
const struct ipv6hdr *hdr;
- struct net_device *dev;
bool deliver;
- __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
- __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
- skb->len);
+ __IP6_UPD_PO_STATS(skb_dst_dev_net_rcu(skb),
+ __in6_dev_get_safely(dev), IPSTATS_MIB_INMCAST,
+ skb->len);
/* skb->dev passed may be master dev for vrfs. */
if (sdif) {
- rcu_read_lock();
- dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
+ dev = dev_get_by_index_rcu(dev_net_rcu(dev), sdif);
if (!dev) {
- rcu_read_unlock();
kfree_skb(skb);
return -ENODEV;
}
- } else {
- dev = skb->dev;
}
hdr = ipv6_hdr(skb);
deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
- if (sdif)
- rcu_read_unlock();
#ifdef CONFIG_IPV6_MROUTE
/*
* IPv6 multicast router mode is now supported ;)
*/
- if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
+ if (atomic_read(&dev_net_rcu(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
!(ipv6_addr_type(&hdr->daddr) &
(IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
@@ -571,22 +574,21 @@ int ip6_mc_input(struct sk_buff *skb)
/* unknown RA - process it normally */
}
- if (deliver)
+ if (deliver) {
skb2 = skb_clone(skb, GFP_ATOMIC);
- else {
+ } else {
skb2 = skb;
skb = NULL;
}
- if (skb2) {
+ if (skb2)
ip6_mr_input(skb2);
- }
}
out:
#endif
- if (likely(deliver))
+ if (likely(deliver)) {
ip6_input(skb);
- else {
+ } else {
/* discard */
kfree_skb(skb);
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 9822163428b0..fce91183797a 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -148,7 +148,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
- skb_reset_transport_header(skb);
+ if (!skb_reset_transport_header_careful(skb))
+ goto out;
+
segs = ops->callbacks.gso_segment(skb, features);
if (!segs)
skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 7bd29a9ff0db..f904739e99b9 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -60,7 +60,7 @@
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst->dev;
+ struct net_device *dev = dst_dev_rcu(dst);
struct inet6_dev *idev = ip6_dst_idev(dst);
unsigned int hh_len = LL_RESERVED_SPACE(dev);
const struct in6_addr *daddr, *nexthop;
@@ -70,15 +70,12 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
/* Be paranoid, rather than too clever. */
if (unlikely(hh_len > skb_headroom(skb)) && dev->header_ops) {
- /* Make sure idev stays alive */
- rcu_read_lock();
+ /* idev stays alive because we hold rcu_read_lock(). */
skb = skb_expand_head(skb, hh_len);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- rcu_read_unlock();
return -ENOMEM;
}
- rcu_read_unlock();
}
hdr = ipv6_hdr(skb);
@@ -123,7 +120,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
- rcu_read_lock();
nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
@@ -131,7 +127,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
if (IS_ERR(neigh)) {
- rcu_read_unlock();
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTNOROUTES);
kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
return -EINVAL;
@@ -139,7 +134,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
}
sock_confirm_neigh(skb, neigh);
ret = neigh_output(neigh, skb, false);
- rcu_read_unlock();
return ret;
}
@@ -232,22 +226,30 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
- struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device *dev, *indev = skb->dev;
+ struct inet6_dev *idev;
+ int ret;
skb->protocol = htons(ETH_P_IPV6);
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ idev = ip6_dst_idev(dst);
skb->dev = dev;
if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
}
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, indev, dev,
- ip6_finish_output,
- !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ ret = NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+ net, sk, skb, indev, dev,
+ ip6_finish_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(ip6_output);
@@ -267,35 +269,36 @@ bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
{
- struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst->dev;
struct inet6_dev *idev = ip6_dst_idev(dst);
struct hop_jumbo_hdr *hop_jumbo;
int hoplen = sizeof(*hop_jumbo);
+ struct net *net = sock_net(sk);
unsigned int head_room;
+ struct net_device *dev;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
- int hlimit = -1;
+ int ret, hlimit = -1;
u32 mtu;
+ rcu_read_lock();
+
+ dev = dst_dev_rcu(dst);
head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(head_room > skb_headroom(skb))) {
- /* Make sure idev stays alive */
- rcu_read_lock();
+ /* idev stays alive while we hold rcu_read_lock(). */
skb = skb_expand_head(skb, head_room);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- rcu_read_unlock();
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto unlock;
}
- rcu_read_unlock();
}
if (opt) {
@@ -357,17 +360,21 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
* skb to its handler for processing
*/
skb = l3mdev_ip6_out((struct sock *)sk, skb);
- if (unlikely(!skb))
- return 0;
+ if (unlikely(!skb)) {
+ ret = 0;
+ goto unlock;
+ }
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
- net, (struct sock *)sk, skb, NULL, dev,
- dst_output);
+ ret = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+ net, (struct sock *)sk, skb, NULL, dev,
+ dst_output);
+ goto unlock;
}
+ ret = -EMSGSIZE;
skb->dev = dev;
/* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
@@ -376,7 +383,9 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
- return -EMSGSIZE;
+unlock:
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(ip6_xmit);
@@ -503,13 +512,15 @@ int ip6_forward(struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
- struct net *net = dev_net(dst->dev);
+ struct net *net = dev_net(dst_dev(dst));
+ struct net_device *dev;
struct inet6_dev *idev;
SKB_DR(reason);
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
- if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
+ if (!READ_ONCE(net->ipv6.devconf_all->forwarding) &&
+ (!idev || !READ_ONCE(idev->cnf.force_forwarding)))
goto error;
if (skb->pkt_type != PACKET_HOST)
@@ -561,7 +572,7 @@ int ip6_forward(struct sk_buff *skb)
/* XXX: idev->cnf.proxy_ndp? */
if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
- pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
+ pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
/* It's tempting to decrease the hop limit
@@ -591,12 +602,12 @@ int ip6_forward(struct sk_buff *skb)
goto drop;
}
dst = skb_dst(skb);
-
+ dev = dst_dev(dst);
/* IPv6 specs say nothing about it, but it is clear that we cannot
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
- if (IP6CB(skb)->iif == dst->dev->ifindex &&
+ if (IP6CB(skb)->iif == dev->ifindex &&
opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct inet_peer *peer;
@@ -644,7 +655,7 @@ int ip6_forward(struct sk_buff *skb)
if (ip6_pkt_too_big(skb, mtu)) {
/* Again, force OUTPUT device used as source address */
- skb->dev = dst->dev;
+ skb->dev = dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
@@ -653,7 +664,7 @@ int ip6_forward(struct sk_buff *skb)
return -EMSGSIZE;
}
- if (skb_cow(skb, dst->dev->hard_header_len)) {
+ if (skb_cow(skb, dev->hard_header_len)) {
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTDISCARDS);
goto drop;
@@ -666,7 +677,7 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--;
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, dst->dev,
+ net, NULL, skb, skb->dev, dev,
ip6_forward_finish);
error:
@@ -1089,11 +1100,13 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
* sockets.
* 2. oif also should be the same.
*/
- if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
+ if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr,
+ np->daddr_cache ? &sk->sk_v6_daddr : NULL) ||
#ifdef CONFIG_IPV6_SUBTREES
- ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
+ ip6_rt_check(&rt->rt6i_src, &fl6->saddr,
+ np->saddr_cache ? &np->saddr : NULL) ||
#endif
- (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+ (fl6->flowi6_oif && fl6->flowi6_oif != dst_dev(dst)->ifindex)) {
dst_release(dst);
dst = NULL;
}
@@ -1760,8 +1773,7 @@ alloc_new_skb:
if (WARN_ON_ONCE(copy > msg->msg_iter.count))
goto error;
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0)
goto error;
copy = err;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 894d3158a6f0..6405072050e0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -632,7 +632,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} else {
if (ip_route_input(skb2, eiph->daddr, eiph->saddr,
ip4h_dscp(eiph), skb2->dev) ||
- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+ skb_dst_dev(skb2)->type != ARPHRD_TUNNEL6)
goto out;
}
@@ -1179,7 +1179,7 @@ route_lookup:
ndst = dst;
}
- tdev = dst->dev;
+ tdev = dst_dev(dst);
if (tdev == dev) {
DEV_STATS_INC(dev, collisions);
@@ -1255,10 +1255,9 @@ route_lookup:
/* Calculate max headroom for all the headers and adjust
* needed_headroom if necessary.
*/
- max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr)
+ dst->header_len + t->hlen;
- if (max_headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, max_headroom);
+ ip_tunnel_adj_headroom(dev, max_headroom);
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
@@ -1278,7 +1277,7 @@ route_lookup:
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
- ip6tunnel_xmit(NULL, skb, dev);
+ ip6tunnel_xmit(NULL, skb, dev, 0);
return 0;
tx_err_link_failure:
DEV_STATS_INC(dev, tx_carrier_errors);
@@ -1562,11 +1561,22 @@ static void ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
netdev_state_change(t->dev);
}
-static void ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p,
+ bool strict)
{
- /* for default tnl0 device allow to change only the proto */
+ /* For the default ip6tnl0 device, allow changing only the protocol
+ * (the IP6_TNL_F_CAP_PER_PACKET flag is set on ip6tnl0, and all other
+ * parameters are 0).
+ */
+ if (strict &&
+ (!ipv6_addr_any(&p->laddr) || !ipv6_addr_any(&p->raddr) ||
+ p->flags != t->parms.flags || p->hop_limit || p->encap_limit ||
+ p->flowinfo || p->link || p->fwmark || p->collect_md))
+ return -EINVAL;
+
t->parms.proto = p->proto;
netdev_state_change(t->dev);
+ return 0;
}
static void
@@ -1680,7 +1690,7 @@ ip6_tnl_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
} else
t = netdev_priv(dev);
if (dev == ip6n->fb_tnl_dev)
- ip6_tnl0_update(t, &p1);
+ ip6_tnl0_update(t, &p1, false);
else
ip6_tnl_update(t, &p1);
}
@@ -2053,8 +2063,28 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct ip_tunnel_encap ipencap;
- if (dev == ip6n->fb_tnl_dev)
- return -EINVAL;
+ if (dev == ip6n->fb_tnl_dev) {
+ if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
+ /* iproute2 always sets TUNNEL_ENCAP_FLAG_CSUM6, so
+ * let's ignore this flag.
+ */
+ ipencap.flags &= ~TUNNEL_ENCAP_FLAG_CSUM6;
+ if (memchr_inv(&ipencap, 0, sizeof(ipencap))) {
+ NL_SET_ERR_MSG(extack,
+ "Only protocol can be changed for fallback tunnel, not encap params");
+ return -EINVAL;
+ }
+ }
+
+ ip6_tnl_netlink_parms(data, &p);
+ if (ip6_tnl0_update(t, &p, true) < 0) {
+ NL_SET_ERR_MSG(extack,
+ "Only protocol can be changed for fallback tunnel");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
int err = ip6_tnl_encap_setup(t, &ipencap);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index c99053189ea8..0ff547a4bff7 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -74,13 +74,14 @@ error:
}
EXPORT_SYMBOL_GPL(udp_sock_create6);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be32 label,
- __be16 src_port, __be16 dst_port, bool nocheck)
+void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck,
+ u16 ip6cb_flags)
{
struct udphdr *uh;
struct ipv6hdr *ip6h;
@@ -108,8 +109,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- ip6tunnel_xmit(sk, skb, dev);
- return 0;
+ ip6tunnel_xmit(sk, skb, dev, ip6cb_flags);
}
EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
@@ -168,7 +168,7 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
return ERR_PTR(-ENETUNREACH);
}
- if (dst->dev == dev) { /* is this necessary? */
+ if (dst_dev(dst) == dev) { /* is this necessary? */
netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
dst_release(dst);
return ERR_PTR(-ELOOP);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 40464a88bca6..ad5290be4dd6 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -497,7 +497,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
(const struct in6_addr *)&x->id.daddr))
goto tx_err_link_failure;
- tdev = dst->dev;
+ tdev = dst_dev(dst);
if (tdev == dev) {
DEV_STATS_INC(dev, collisions);
@@ -529,7 +529,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
xmit:
skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
skb_dst_set(skb, dst);
- skb->dev = skb_dst(skb)->dev;
+ skb->dev = dst_dev(dst);
err = dst_output(t->net, skb->sk, skb);
if (net_xmit_eval(err) == 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9db31e5b998c..e047a4680ab0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2035,8 +2035,8 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct
* Processing handlers for ip6mr_forward
*/
-static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, int vifi)
+static int ip6mr_prepare_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
{
struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *vif_dev;
@@ -2046,7 +2046,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
vif_dev = vif_dev_read(vif);
if (!vif_dev)
- goto out_free;
+ return -1;
#ifdef CONFIG_IPV6_PIMSM_V2
if (vif->flags & MIFF_REGISTER) {
@@ -2055,7 +2055,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
- goto out_free;
+ return -1;
}
#endif
@@ -2069,7 +2069,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
- goto out_free;
+ return -1;
}
skb_dst_drop(skb);
@@ -2093,20 +2093,43 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
/* We are about to write */
/* XXX: extension headers? */
if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(vif_dev)))
- goto out_free;
+ return -1;
ipv6h = ipv6_hdr(skb);
ipv6h->hop_limit--;
+ return 0;
+}
+
+static void ip6mr_forward2(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ struct net_device *indev = skb->dev;
+
+ if (ip6mr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
- return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, vif_dev,
- ip6mr_forward2_finish);
+ NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+ net, NULL, skb, indev, skb->dev,
+ ip6mr_forward2_finish);
+ return;
+
+out_free:
+ kfree_skb(skb);
+}
+
+static void ip6mr_output2(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ if (ip6mr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ ip6_output(net, NULL, skb);
+ return;
out_free:
kfree_skb(skb);
- return 0;
}
/* Called with rcu_read_lock() */
@@ -2221,6 +2244,56 @@ dont_forward:
kfree_skb(skb);
}
+/* Called under rcu_read_lock() */
+static void ip6_mr_output_finish(struct net *net, struct mr_table *mrt,
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc6_cache *c)
+{
+ int psend = -1;
+ int ct;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+
+ /* Forward the frame */
+ if (ipv6_addr_any(&c->mf6c_origin) &&
+ ipv6_addr_any(&c->mf6c_mcastgrp)) {
+ if (ipv6_hdr(skb)->hop_limit >
+ c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
+ /* It's an (*,*) entry and the packet is not coming from
+ * the upstream: forward the packet to the upstream
+ * only.
+ */
+ psend = c->_c.mfc_parent;
+ goto last_forward;
+ }
+ goto dont_forward;
+ }
+ for (ct = c->_c.mfc_un.res.maxvif - 1;
+ ct >= c->_c.mfc_un.res.minvif; ct--) {
+ if (ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
+ if (psend != -1) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2)
+ ip6mr_output2(net, mrt, skb2, psend);
+ }
+ psend = ct;
+ }
+ }
+last_forward:
+ if (psend != -1) {
+ ip6mr_output2(net, mrt, skb, psend);
+ return;
+ }
+
+dont_forward:
+ kfree_skb(skb);
+}
/*
* Multicast packets for forwarding arrive here
@@ -2228,21 +2301,20 @@ dont_forward:
int ip6_mr_input(struct sk_buff *skb)
{
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net_rcu(dev);
struct mfc6_cache *cache;
- struct net *net = dev_net(skb->dev);
struct mr_table *mrt;
struct flowi6 fl6 = {
- .flowi6_iif = skb->dev->ifindex,
+ .flowi6_iif = dev->ifindex,
.flowi6_mark = skb->mark,
};
int err;
- struct net_device *dev;
/* skb->dev passed in is the master dev for vrfs.
* Get the proper interface that does have a vif associated with it.
*/
- dev = skb->dev;
- if (netif_is_l3_master(skb->dev)) {
+ if (netif_is_l3_master(dev)) {
dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
if (!dev) {
kfree_skb(skb);
@@ -2288,6 +2360,61 @@ int ip6_mr_input(struct sk_buff *skb)
return 0;
}
+int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+ struct flowi6 fl6 = (struct flowi6) {
+ .flowi6_iif = LOOPBACK_IFINDEX,
+ .flowi6_mark = skb->mark,
+ };
+ struct mfc6_cache *cache;
+ struct mr_table *mrt;
+ int err;
+ int vif;
+
+ guard(rcu)();
+
+ if (IP6CB(skb)->flags & IP6SKB_FORWARDED)
+ goto ip6_output;
+ if (!(IP6CB(skb)->flags & IP6SKB_MCROUTE))
+ goto ip6_output;
+
+ err = ip6mr_fib_lookup(net, &fl6, &mrt);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ cache = ip6mr_cache_find(mrt,
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
+ if (!cache) {
+ vif = ip6mr_find_vif(mrt, dev);
+ if (vif >= 0)
+ cache = ip6mr_cache_find_any(mrt,
+ &ipv6_hdr(skb)->daddr,
+ vif);
+ }
+
+ /* No usable cache entry */
+ if (!cache) {
+ vif = ip6mr_find_vif(mrt, dev);
+ if (vif >= 0)
+ return ip6mr_cache_unresolved(mrt, vif, skb, dev);
+ goto ip6_output;
+ }
+
+ /* Wrong interface */
+ vif = cache->_c.mfc_parent;
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
+ goto ip6_output;
+
+ ip6_mr_output_finish(net, mrt, dev, skb, cache);
+ return 0;
+
+ip6_output:
+ return ip6_output(net, sk, skb);
+}
+
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
u32 portid)
{
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 72d4858dec18..8607569de34f 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -71,6 +71,7 @@ static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return 0;
}
+static struct lock_class_key xfrm_state_lock_key;
static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -79,6 +80,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
t = xfrm_state_alloc(net);
if (!t)
goto out;
+ lockdep_set_class(&t->lock, &xfrm_state_lock_key);
t->id.proto = IPPROTO_IPV6;
t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 1e225e6489ea..a61e742794f9 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -49,6 +49,7 @@
#include <net/xfrm.h>
#include <net/compat.h>
#include <net/seg6.h>
+#include <net/psp.h>
#include <linux/uaccess.h>
@@ -107,7 +108,10 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) {
struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
+
+ icsk->icsk_ext_hdr_len =
+ psp_sk_overhead(sk) +
+ opt->opt_flen + opt->opt_nflen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
}
@@ -117,26 +121,6 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
return opt;
}
-static bool setsockopt_needs_rtnl(int optname)
-{
- switch (optname) {
- case IPV6_ADDRFORM:
- case IPV6_ADD_MEMBERSHIP:
- case IPV6_DROP_MEMBERSHIP:
- case IPV6_JOIN_ANYCAST:
- case IPV6_LEAVE_ANYCAST:
- case MCAST_JOIN_GROUP:
- case MCAST_LEAVE_GROUP:
- case MCAST_JOIN_SOURCE_GROUP:
- case MCAST_LEAVE_SOURCE_GROUP:
- case MCAST_BLOCK_SOURCE:
- case MCAST_UNBLOCK_SOURCE:
- case MCAST_MSFILTER:
- return true;
- }
- return false;
-}
-
static int copy_group_source_from_sockptr(struct group_source_req *greqs,
sockptr_t optval, int optlen)
{
@@ -395,9 +379,8 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct net *net = sock_net(sk);
- int val, valbool;
int retv = -ENOPROTOOPT;
- bool needs_rtnl = setsockopt_needs_rtnl(optname);
+ int val, valbool;
if (sockptr_is_null(optval))
val = 0;
@@ -562,8 +545,7 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
return 0;
}
}
- if (needs_rtnl)
- rtnl_lock();
+
sockopt_lock_sock(sk);
/* Another thread has converted the socket into IPv4 with
@@ -969,8 +951,6 @@ done:
unlock:
sockopt_release_sock(sk);
- if (needs_rtnl)
- rtnl_unlock();
return retv;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 65831b4fee1f..016b572e7d6f 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -108,9 +108,9 @@ static int __ipv6_dev_mc_inc(struct net_device *dev,
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
-/*
- * socket join on multicast group
- */
+#define mc_assert_locked(idev) \
+ lockdep_assert_held(&(idev)->mc_lock)
+
#define mc_dereference(e, idev) \
rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
@@ -169,17 +169,41 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
return iv > 0 ? iv : 1;
}
+static struct net_device *ip6_mc_find_dev(struct net *net,
+ const struct in6_addr *group,
+ int ifindex)
+{
+ struct net_device *dev = NULL;
+ struct rt6_info *rt;
+
+ if (ifindex == 0) {
+ rcu_read_lock();
+ rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
+ if (rt) {
+ dev = dst_dev_rcu(&rt->dst);
+ dev_hold(dev);
+ ip6_rt_put(rt);
+ }
+ rcu_read_unlock();
+ } else {
+ dev = dev_get_by_index(net, ifindex);
+ }
+
+ return dev;
+}
+
+/*
+ * socket join on multicast group
+ */
static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr, unsigned int mode)
{
- struct net_device *dev = NULL;
- struct ipv6_mc_socklist *mc_lst;
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
+ struct net_device *dev = NULL;
int err;
- ASSERT_RTNL();
-
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
@@ -190,23 +214,13 @@ static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
}
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
-
if (!mc_lst)
return -ENOMEM;
mc_lst->next = NULL;
mc_lst->addr = *addr;
- if (ifindex == 0) {
- struct rt6_info *rt;
- rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
- if (rt) {
- dev = rt->dst.dev;
- ip6_rt_put(rt);
- }
- } else
- dev = __dev_get_by_index(net, ifindex);
-
+ dev = ip6_mc_find_dev(net, addr, ifindex);
if (!dev) {
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
@@ -216,12 +230,11 @@ static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
mc_lst->sfmode = mode;
RCU_INIT_POINTER(mc_lst->sflist, NULL);
- /*
- * now add/increase the group membership on the device
- */
-
+ /* now add/increase the group membership on the device */
err = __ipv6_dev_mc_inc(dev, addr, mode);
+ dev_put(dev);
+
if (err) {
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
@@ -248,14 +261,36 @@ int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
/*
* socket leave on multicast group
*/
+static void __ipv6_sock_mc_drop(struct sock *sk, struct ipv6_mc_socklist *mc_lst)
+{
+ struct net *net = sock_net(sk);
+ struct net_device *dev;
+
+ dev = dev_get_by_index(net, mc_lst->ifindex);
+ if (dev) {
+ struct inet6_dev *idev = in6_dev_get(dev);
+
+ ip6_mc_leave_src(sk, mc_lst, idev);
+
+ if (idev) {
+ __ipv6_dev_mc_dec(idev, &mc_lst->addr);
+ in6_dev_put(idev);
+ }
+
+ dev_put(dev);
+ } else {
+ ip6_mc_leave_src(sk, mc_lst, NULL);
+ }
+
+ atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+ kfree_rcu(mc_lst, rcu);
+}
+
int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
- struct ipv6_mc_socklist *mc_lst;
struct ipv6_mc_socklist __rcu **lnk;
- struct net *net = sock_net(sk);
-
- ASSERT_RTNL();
+ struct ipv6_mc_socklist *mc_lst;
if (!ipv6_addr_is_multicast(addr))
return -EINVAL;
@@ -265,23 +300,8 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
lnk = &mc_lst->next) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
- struct net_device *dev;
-
*lnk = mc_lst->next;
-
- dev = __dev_get_by_index(net, mc_lst->ifindex);
- if (dev) {
- struct inet6_dev *idev = __in6_dev_get(dev);
-
- ip6_mc_leave_src(sk, mc_lst, idev);
- if (idev)
- __ipv6_dev_mc_dec(idev, &mc_lst->addr);
- } else {
- ip6_mc_leave_src(sk, mc_lst, NULL);
- }
-
- atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
- kfree_rcu(mc_lst, rcu);
+ __ipv6_sock_mc_drop(sk, mc_lst);
return 0;
}
}
@@ -290,31 +310,20 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
}
EXPORT_SYMBOL(ipv6_sock_mc_drop);
-static struct inet6_dev *ip6_mc_find_dev_rtnl(struct net *net,
- const struct in6_addr *group,
- int ifindex)
+static struct inet6_dev *ip6_mc_find_idev(struct net *net,
+ const struct in6_addr *group,
+ int ifindex)
{
- struct net_device *dev = NULL;
- struct inet6_dev *idev = NULL;
-
- if (ifindex == 0) {
- struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
-
- if (rt) {
- dev = rt->dst.dev;
- ip6_rt_put(rt);
- }
- } else {
- dev = __dev_get_by_index(net, ifindex);
- }
+ struct net_device *dev;
+ struct inet6_dev *idev;
+ dev = ip6_mc_find_dev(net, group, ifindex);
if (!dev)
return NULL;
- idev = __in6_dev_get(dev);
- if (!idev)
- return NULL;
- if (idev->dead)
- return NULL;
+
+ idev = in6_dev_get(dev);
+ dev_put(dev);
+
return idev;
}
@@ -322,28 +331,10 @@ void __ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
- struct net *net = sock_net(sk);
-
- ASSERT_RTNL();
while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
- struct net_device *dev;
-
np->ipv6_mc_list = mc_lst->next;
-
- dev = __dev_get_by_index(net, mc_lst->ifindex);
- if (dev) {
- struct inet6_dev *idev = __in6_dev_get(dev);
-
- ip6_mc_leave_src(sk, mc_lst, idev);
- if (idev)
- __ipv6_dev_mc_dec(idev, &mc_lst->addr);
- } else {
- ip6_mc_leave_src(sk, mc_lst, NULL);
- }
-
- atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
- kfree_rcu(mc_lst, rcu);
+ __ipv6_sock_mc_drop(sk, mc_lst);
}
}
@@ -354,24 +345,22 @@ void ipv6_sock_mc_close(struct sock *sk)
if (!rcu_access_pointer(np->ipv6_mc_list))
return;
- rtnl_lock();
lock_sock(sk);
__ipv6_sock_mc_close(sk);
release_sock(sk);
- rtnl_unlock();
}
int ip6_mc_source(int add, int omode, struct sock *sk,
- struct group_source_req *pgsr)
+ struct group_source_req *pgsr)
{
+ struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct in6_addr *source, *group;
+ struct net *net = sock_net(sk);
struct ipv6_mc_socklist *pmc;
- struct inet6_dev *idev;
- struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct ip6_sf_socklist *psl;
- struct net *net = sock_net(sk);
- int i, j, rv;
+ struct inet6_dev *idev;
int leavegroup = 0;
+ int i, j, rv;
int err;
source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
@@ -380,13 +369,19 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
- idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
+ idev = ip6_mc_find_idev(net, group, pgsr->gsr_interface);
if (!idev)
return -ENODEV;
+ mutex_lock(&idev->mc_lock);
+
+ if (idev->dead) {
+ err = -ENODEV;
+ goto done;
+ }
+
err = -EADDRNOTAVAIL;
- mutex_lock(&idev->mc_lock);
for_each_pmc_socklock(inet6, sk, pmc) {
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
continue;
@@ -483,6 +478,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
ip6_mc_add_src(idev, group, omode, 1, source, 1);
done:
mutex_unlock(&idev->mc_lock);
+ in6_dev_put(idev);
if (leavegroup)
err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
return err;
@@ -491,12 +487,12 @@ done:
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
struct sockaddr_storage *list)
{
- const struct in6_addr *group;
- struct ipv6_mc_socklist *pmc;
- struct inet6_dev *idev;
struct ipv6_pinfo *inet6 = inet6_sk(sk);
struct ip6_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
+ const struct in6_addr *group;
+ struct ipv6_mc_socklist *pmc;
+ struct inet6_dev *idev;
int leavegroup = 0;
int i, err;
@@ -508,10 +504,17 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
gsf->gf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
+ idev = ip6_mc_find_idev(net, group, gsf->gf_interface);
if (!idev)
return -ENODEV;
+ mutex_lock(&idev->mc_lock);
+
+ if (idev->dead) {
+ err = -ENODEV;
+ goto done;
+ }
+
err = 0;
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
@@ -544,24 +547,19 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
psin6 = (struct sockaddr_in6 *)list;
newpsl->sl_addr[i] = psin6->sin6_addr;
}
- mutex_lock(&idev->mc_lock);
+
err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
newpsl->sl_count, newpsl->sl_addr, 0);
if (err) {
- mutex_unlock(&idev->mc_lock);
sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
newpsl->sl_max));
goto done;
}
- mutex_unlock(&idev->mc_lock);
} else {
newpsl = NULL;
- mutex_lock(&idev->mc_lock);
ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
- mutex_unlock(&idev->mc_lock);
}
- mutex_lock(&idev->mc_lock);
psl = sock_dereference(pmc->sflist, sk);
if (psl) {
ip6_mc_del_src(idev, group, pmc->sfmode,
@@ -571,12 +569,14 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
} else {
ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
}
+
rcu_assign_pointer(pmc->sflist, newpsl);
- mutex_unlock(&idev->mc_lock);
kfree_rcu(psl, rcu);
pmc->sfmode = gsf->gf_fmode;
err = 0;
done:
+ mutex_unlock(&idev->mc_lock);
+ in6_dev_put(idev);
if (leavegroup)
err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
return err;
@@ -597,10 +597,6 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
if (!ipv6_addr_is_multicast(group))
return -EINVAL;
- /* changes to the ipv6_mc_list require the socket lock and
- * rtnl lock. We have the socket lock, so reading the list is safe.
- */
-
for_each_pmc_socklock(inet6, sk, pmc) {
if (pmc->ifindex != gsf->gf_interface)
continue;
@@ -668,12 +664,13 @@ bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
return rv;
}
-/* called with mc_lock */
static void igmp6_group_added(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
+ mc_assert_locked(mc->idev);
+
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
return;
@@ -703,12 +700,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
mld_ifc_event(mc->idev);
}
-/* called with mc_lock */
static void igmp6_group_dropped(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
+ mc_assert_locked(mc->idev);
+
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
return;
@@ -729,14 +727,13 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
refcount_dec(&mc->mca_refcnt);
}
-/*
- * deleted ifmcaddr6 manipulation
- * called with mc_lock
- */
+/* deleted ifmcaddr6 manipulation */
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc;
+ mc_assert_locked(idev);
+
/* this is an "ifmcaddr6" for convenience; only the fields below
* are actually used. In particular, the refcnt and users are not
* used for management of the delete list. Using the same structure
@@ -770,54 +767,54 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
rcu_assign_pointer(idev->mc_tomb, pmc);
}
-/* called with mc_lock */
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ip6_sf_list *psf, *sources, *tomb;
struct in6_addr *pmca = &im->mca_addr;
struct ifmcaddr6 *pmc, *pmc_prev;
+ mc_assert_locked(idev);
+
pmc_prev = NULL;
for_each_mc_tomb(idev, pmc) {
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
break;
pmc_prev = pmc;
}
- if (pmc) {
- if (pmc_prev)
- rcu_assign_pointer(pmc_prev->next, pmc->next);
- else
- rcu_assign_pointer(idev->mc_tomb, pmc->next);
- }
-
- if (pmc) {
- im->idev = pmc->idev;
- if (im->mca_sfmode == MCAST_INCLUDE) {
- tomb = rcu_replace_pointer(im->mca_tomb,
- mc_dereference(pmc->mca_tomb, pmc->idev),
- lockdep_is_held(&im->idev->mc_lock));
- rcu_assign_pointer(pmc->mca_tomb, tomb);
-
- sources = rcu_replace_pointer(im->mca_sources,
- mc_dereference(pmc->mca_sources, pmc->idev),
- lockdep_is_held(&im->idev->mc_lock));
- rcu_assign_pointer(pmc->mca_sources, sources);
- for_each_psf_mclock(im, psf)
- psf->sf_crcount = idev->mc_qrv;
- } else {
- im->mca_crcount = idev->mc_qrv;
- }
- in6_dev_put(pmc->idev);
- ip6_mc_clear_src(pmc);
- kfree_rcu(pmc, rcu);
+ if (!pmc)
+ return;
+ if (pmc_prev)
+ rcu_assign_pointer(pmc_prev->next, pmc->next);
+ else
+ rcu_assign_pointer(idev->mc_tomb, pmc->next);
+
+ im->idev = pmc->idev;
+ if (im->mca_sfmode == MCAST_INCLUDE) {
+ tomb = rcu_replace_pointer(im->mca_tomb,
+ mc_dereference(pmc->mca_tomb, pmc->idev),
+ lockdep_is_held(&im->idev->mc_lock));
+ rcu_assign_pointer(pmc->mca_tomb, tomb);
+
+ sources = rcu_replace_pointer(im->mca_sources,
+ mc_dereference(pmc->mca_sources, pmc->idev),
+ lockdep_is_held(&im->idev->mc_lock));
+ rcu_assign_pointer(pmc->mca_sources, sources);
+ for_each_psf_mclock(im, psf)
+ psf->sf_crcount = idev->mc_qrv;
+ } else {
+ im->mca_crcount = idev->mc_qrv;
}
+ ip6_mc_clear_src(pmc);
+ in6_dev_put(pmc->idev);
+ kfree_rcu(pmc, rcu);
}
-/* called with mc_lock */
static void mld_clear_delrec(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc, *nextpmc;
+ mc_assert_locked(idev);
+
pmc = mc_dereference(idev->mc_tomb, idev);
RCU_INIT_POINTER(idev->mc_tomb, NULL);
@@ -843,29 +840,18 @@ static void mld_clear_delrec(struct inet6_dev *idev)
static void mld_clear_query(struct inet6_dev *idev)
{
- struct sk_buff *skb;
-
spin_lock_bh(&idev->mc_query_lock);
- while ((skb = __skb_dequeue(&idev->mc_query_queue)))
- kfree_skb(skb);
+ __skb_queue_purge(&idev->mc_query_queue);
spin_unlock_bh(&idev->mc_query_lock);
}
static void mld_clear_report(struct inet6_dev *idev)
{
- struct sk_buff *skb;
-
spin_lock_bh(&idev->mc_report_lock);
- while ((skb = __skb_dequeue(&idev->mc_report_queue)))
- kfree_skb(skb);
+ __skb_queue_purge(&idev->mc_report_queue);
spin_unlock_bh(&idev->mc_report_lock);
}
-static void mca_get(struct ifmcaddr6 *mc)
-{
- refcount_inc(&mc->mca_refcnt);
-}
-
static void ma_put(struct ifmcaddr6 *mc)
{
if (refcount_dec_and_test(&mc->mca_refcnt)) {
@@ -874,13 +860,14 @@ static void ma_put(struct ifmcaddr6 *mc)
}
}
-/* called with mc_lock */
static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
unsigned int mode)
{
struct ifmcaddr6 *mc;
+ mc_assert_locked(idev);
+
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
@@ -945,23 +932,22 @@ error:
static int __ipv6_dev_mc_inc(struct net_device *dev,
const struct in6_addr *addr, unsigned int mode)
{
- struct ifmcaddr6 *mc;
struct inet6_dev *idev;
-
- ASSERT_RTNL();
+ struct ifmcaddr6 *mc;
/* we need to take a reference on idev */
idev = in6_dev_get(dev);
-
if (!idev)
return -EINVAL;
- if (idev->dead) {
+ mutex_lock(&idev->mc_lock);
+
+ if (READ_ONCE(idev->dead)) {
+ mutex_unlock(&idev->mc_lock);
in6_dev_put(idev);
return -ENODEV;
}
- mutex_lock(&idev->mc_lock);
for_each_mc_mclock(idev, mc) {
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
mc->mca_users++;
@@ -982,13 +968,11 @@ static int __ipv6_dev_mc_inc(struct net_device *dev,
rcu_assign_pointer(mc->next, idev->mc_list);
rcu_assign_pointer(idev->mc_list, mc);
- mca_get(mc);
-
mld_del_delrec(idev, mc);
igmp6_group_added(mc);
inet6_ifmcaddr_notify(dev, mc, RTM_NEWMULTICAST);
mutex_unlock(&idev->mc_lock);
- ma_put(mc);
+
return 0;
}
@@ -1005,9 +989,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifmcaddr6 *ma, __rcu **map;
- ASSERT_RTNL();
-
mutex_lock(&idev->mc_lock);
+
for (map = &idev->mc_list;
(ma = mc_dereference(*map, idev));
map = &ma->next) {
@@ -1038,13 +1021,12 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
struct inet6_dev *idev;
int err;
- ASSERT_RTNL();
-
- idev = __in6_dev_get(dev);
+ idev = in6_dev_get(dev);
if (!idev)
- err = -ENODEV;
- else
- err = __ipv6_dev_mc_dec(idev, addr);
+ return -ENODEV;
+
+ err = __ipv6_dev_mc_dec(idev, addr);
+ in6_dev_put(idev);
return err;
}
@@ -1091,46 +1073,51 @@ unlock:
return rv;
}
-/* called with mc_lock */
static void mld_gq_start_work(struct inet6_dev *idev)
{
unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
+ mc_assert_locked(idev);
+
idev->mc_gq_running = 1;
if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
in6_dev_hold(idev);
}
-/* called with mc_lock */
static void mld_gq_stop_work(struct inet6_dev *idev)
{
+ mc_assert_locked(idev);
+
idev->mc_gq_running = 0;
if (cancel_delayed_work(&idev->mc_gq_work))
__in6_dev_put(idev);
}
-/* called with mc_lock */
static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
{
unsigned long tv = get_random_u32_below(delay);
+ mc_assert_locked(idev);
+
if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
in6_dev_hold(idev);
}
-/* called with mc_lock */
static void mld_ifc_stop_work(struct inet6_dev *idev)
{
+ mc_assert_locked(idev);
+
idev->mc_ifc_count = 0;
if (cancel_delayed_work(&idev->mc_ifc_work))
__in6_dev_put(idev);
}
-/* called with mc_lock */
static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
{
unsigned long tv = get_random_u32_below(delay);
+ mc_assert_locked(idev);
+
if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
in6_dev_hold(idev);
}
@@ -1155,14 +1142,13 @@ static void mld_report_stop_work(struct inet6_dev *idev)
__in6_dev_put(idev);
}
-/*
- * IGMP handling (alias multicast ICMPv6 messages)
- * called with mc_lock
- */
+/* IGMP handling (alias multicast ICMPv6 messages) */
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
{
unsigned long delay = resptime;
+ mc_assert_locked(ma->idev);
+
/* Do not start work for these addresses */
if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -1181,15 +1167,15 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
ma->mca_flags |= MAF_TIMER_RUNNING;
}
-/* mark EXCLUDE-mode sources
- * called with mc_lock
- */
+/* mark EXCLUDE-mode sources */
static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
+ mc_assert_locked(pmc->idev);
+
scount = 0;
for_each_psf_mclock(pmc, psf) {
if (scount == nsrcs)
@@ -1212,13 +1198,14 @@ static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
return true;
}
-/* called with mc_lock */
static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
const struct in6_addr *srcs)
{
struct ip6_sf_list *psf;
int i, scount;
+ mc_assert_locked(pmc->idev);
+
if (pmc->mca_sfmode == MCAST_EXCLUDE)
return mld_xmarksources(pmc, nsrcs, srcs);
@@ -1913,7 +1900,6 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
-/* called with mc_lock */
static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
int type, int gdeleted, int sdeleted,
int crsend)
@@ -1927,6 +1913,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
struct mld2_report *pmr;
unsigned int mtu;
+ mc_assert_locked(idev);
+
if (pmc->mca_flags & MAF_NOREPORT)
return skb;
@@ -2045,12 +2033,13 @@ empty_source:
return skb;
}
-/* called with mc_lock */
static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
{
struct sk_buff *skb = NULL;
int type;
+ mc_assert_locked(idev);
+
if (!pmc) {
for_each_mc_mclock(idev, pmc) {
if (pmc->mca_flags & MAF_NOREPORT)
@@ -2072,10 +2061,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
mld_sendpack(skb);
}
-/*
- * remove zero-count source records from a source filter list
- * called with mc_lock
- */
+/* remove zero-count source records from a source filter list */
static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
{
struct ip6_sf_list *psf_prev, *psf_next, *psf;
@@ -2099,7 +2085,6 @@ static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *i
}
}
-/* called with mc_lock */
static void mld_send_cr(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
@@ -2263,13 +2248,14 @@ err_out:
goto out;
}
-/* called with mc_lock */
static void mld_send_initial_cr(struct inet6_dev *idev)
{
- struct sk_buff *skb;
struct ifmcaddr6 *pmc;
+ struct sk_buff *skb;
int type;
+ mc_assert_locked(idev);
+
if (mld_in_v1_mode(idev))
return;
@@ -2316,13 +2302,14 @@ static void mld_dad_work(struct work_struct *work)
in6_dev_put(idev);
}
-/* called with mc_lock */
static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
- const struct in6_addr *psfsrc)
+ const struct in6_addr *psfsrc)
{
struct ip6_sf_list *psf, *psf_prev;
int rv = 0;
+ mc_assert_locked(pmc->idev);
+
psf_prev = NULL;
for_each_psf_mclock(pmc, psf) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
@@ -2359,7 +2346,6 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
return rv;
}
-/* called with mc_lock */
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta)
@@ -2371,6 +2357,8 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!idev)
return -ENODEV;
+ mc_assert_locked(idev);
+
for_each_mc_mclock(idev, pmc) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
@@ -2412,15 +2400,14 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
return err;
}
-/*
- * Add multicast single-source filter to the interface list
- * called with mc_lock
- */
+/* Add multicast single-source filter to the interface list */
static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
- const struct in6_addr *psfsrc)
+ const struct in6_addr *psfsrc)
{
struct ip6_sf_list *psf, *psf_prev;
+ mc_assert_locked(pmc->idev);
+
psf_prev = NULL;
for_each_psf_mclock(pmc, psf) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
@@ -2443,11 +2430,12 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
return 0;
}
-/* called with mc_lock */
static void sf_markstate(struct ifmcaddr6 *pmc)
{
- struct ip6_sf_list *psf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
+ struct ip6_sf_list *psf;
+
+ mc_assert_locked(pmc->idev);
for_each_psf_mclock(pmc, psf) {
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
@@ -2460,14 +2448,15 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
}
}
-/* called with mc_lock */
static int sf_setstate(struct ifmcaddr6 *pmc)
{
- struct ip6_sf_list *psf, *dpsf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
+ struct ip6_sf_list *psf, *dpsf;
int qrv = pmc->idev->mc_qrv;
int new_in, rv;
+ mc_assert_locked(pmc->idev);
+
rv = 0;
for_each_psf_mclock(pmc, psf) {
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
@@ -2526,10 +2515,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
return rv;
}
-/*
- * Add multicast source filter list to the interface list
- * called with mc_lock
- */
+/* Add multicast source filter list to the interface list */
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int sfmode, int sfcount, const struct in6_addr *psfsrc,
int delta)
@@ -2541,6 +2527,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!idev)
return -ENODEV;
+ mc_assert_locked(idev);
+
for_each_mc_mclock(idev, pmc) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
@@ -2588,11 +2576,12 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
return err;
}
-/* called with mc_lock */
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf, *nextpsf;
+ mc_assert_locked(pmc->idev);
+
for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
psf;
psf = nextpsf) {
@@ -2613,11 +2602,12 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
WRITE_ONCE(pmc->mca_sfcount[MCAST_EXCLUDE], 1);
}
-/* called with mc_lock */
static void igmp6_join_group(struct ifmcaddr6 *ma)
{
unsigned long delay;
+ mc_assert_locked(ma->idev);
+
if (ma->mca_flags & MAF_NOREPORT)
return;
@@ -2664,9 +2654,10 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
return err;
}
-/* called with mc_lock */
static void igmp6_leave_group(struct ifmcaddr6 *ma)
{
+ mc_assert_locked(ma->idev);
+
if (mld_in_v1_mode(ma->idev)) {
if (ma->mca_flags & MAF_LAST_REPORTER) {
igmp6_send(&ma->mca_addr, ma->idev->dev,
@@ -2711,9 +2702,10 @@ static void mld_ifc_work(struct work_struct *work)
in6_dev_put(idev);
}
-/* called with mc_lock */
static void mld_ifc_event(struct inet6_dev *idev)
{
+ mc_assert_locked(idev);
+
if (mld_in_v1_mode(idev))
return;
@@ -2868,8 +2860,6 @@ static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc;
- ASSERT_RTNL();
-
mutex_lock(&idev->mc_lock);
if (mld_in_v1_mode(idev)) {
for_each_mc_mclock(idev, pmc)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index ecb5c4b8518f..f427e41e9c49 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -130,7 +130,7 @@ struct neigh_table nd_tbl = {
[NEIGH_VAR_DELAY_PROBE_TIME] = 5 * HZ,
[NEIGH_VAR_INTERVAL_PROBE_TIME_MS] = 5 * HZ,
[NEIGH_VAR_GC_STALETIME] = 60 * HZ,
- [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_MAX,
+ [NEIGH_VAR_QUEUE_LEN_BYTES] = SK_WMEM_DEFAULT,
[NEIGH_VAR_PROXY_QLEN] = 64,
[NEIGH_VAR_ANYCAST_DELAY] = 1 * HZ,
[NEIGH_VAR_PROXY_DELAY] = (8 * HZ) / 10,
@@ -243,9 +243,8 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
case ND_OPT_NONCE:
case ND_OPT_REDIRECT_HDR:
if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
- ND_PRINTK(2, warn,
- "%s: duplicated ND6 option found: type=%d\n",
- __func__, nd_opt->nd_opt_type);
+ net_dbg_ratelimited("%s: duplicated ND6 option found: type=%d\n",
+ __func__, nd_opt->nd_opt_type);
} else {
ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
}
@@ -275,11 +274,8 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
* to accommodate future extension to the
* protocol.
*/
- ND_PRINTK(2, notice,
- "%s: ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type,
- nd_opt->nd_opt_len);
+ net_dbg_ratelimited("%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__, nd_opt->nd_opt_type, nd_opt->nd_opt_len);
}
next_opt:
opt_len -= l;
@@ -377,24 +373,25 @@ static int ndisc_constructor(struct neighbour *neigh)
static int pndisc_constructor(struct pneigh_entry *n)
{
struct in6_addr *addr = (struct in6_addr *)&n->key;
- struct in6_addr maddr;
struct net_device *dev = n->dev;
+ struct in6_addr maddr;
- if (!dev || !__in6_dev_get(dev))
+ if (!dev)
return -EINVAL;
+
addrconf_addr_solict_mult(addr, &maddr);
- ipv6_dev_mc_inc(dev, &maddr);
- return 0;
+ return ipv6_dev_mc_inc(dev, &maddr);
}
static void pndisc_destructor(struct pneigh_entry *n)
{
struct in6_addr *addr = (struct in6_addr *)&n->key;
- struct in6_addr maddr;
struct net_device *dev = n->dev;
+ struct in6_addr maddr;
- if (!dev || !__in6_dev_get(dev))
+ if (!dev)
return;
+
addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_dec(dev, &maddr);
}
@@ -473,6 +470,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
{
struct icmp6hdr *icmp6h = icmp6_hdr(skb);
struct dst_entry *dst = skb_dst(skb);
+ struct net_device *dev;
struct inet6_dev *idev;
struct net *net;
struct sock *sk;
@@ -507,11 +505,12 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr,
ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
- idev = __in6_dev_get(dst->dev);
+ dev = dst_dev_rcu(dst);
+ idev = __in6_dev_get(dev);
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
- net, sk, skb, NULL, dst->dev,
+ net, sk, skb, NULL, dev,
dst_output);
if (!err) {
ICMP6MSGOUT_INC_STATS(net, idev, type);
@@ -751,9 +750,8 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
probes -= NEIGH_VAR(neigh->parms, UCAST_PROBES);
if (probes < 0) {
if (!(READ_ONCE(neigh->nud_state) & NUD_VALID)) {
- ND_PRINTK(1, dbg,
- "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
- __func__, target);
+ net_dbg_ratelimited("%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+ __func__, target);
}
ndisc_send_ns(dev, target, target, saddr, 0);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
@@ -770,11 +768,9 @@ static int pndisc_is_router(const void *pkey,
struct pneigh_entry *n;
int ret = -1;
- read_lock_bh(&nd_tbl.lock);
- n = __pneigh_lookup(&nd_tbl, dev_net(dev), pkey, dev);
+ n = pneigh_lookup(&nd_tbl, dev_net(dev), pkey, dev);
if (n)
- ret = !!(n->flags & NTF_ROUTER);
- read_unlock_bh(&nd_tbl.lock);
+ ret = !!(READ_ONCE(n->flags) & NTF_ROUTER);
return ret;
}
@@ -811,7 +807,7 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
return SKB_DROP_REASON_PKT_TOO_SMALL;
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK(2, warn, "NS: multicast target address\n");
+ net_dbg_ratelimited("NS: multicast target address\n");
return reason;
}
@@ -820,7 +816,7 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
* DAD has to be destined for solicited node multicast address.
*/
if (dad && !ipv6_addr_is_solict_mult(daddr)) {
- ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
+ net_dbg_ratelimited("NS: bad DAD packet (wrong destination)\n");
return reason;
}
@@ -830,8 +826,7 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
if (ndopts.nd_opts_src_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
if (!lladdr) {
- ND_PRINTK(2, warn,
- "NS: invalid link-layer address length\n");
+ net_dbg_ratelimited("NS: invalid link-layer address length\n");
return reason;
}
@@ -841,8 +836,7 @@ static enum skb_drop_reason ndisc_recv_ns(struct sk_buff *skb)
* in the message.
*/
if (dad) {
- ND_PRINTK(2, warn,
- "NS: bad DAD packet (link-layer address option)\n");
+ net_dbg_ratelimited("NS: bad DAD packet (link-layer address option)\n");
return reason;
}
}
@@ -859,10 +853,8 @@ have_ifp:
if (nonce != 0 && ifp->dad_nonce == nonce) {
u8 *np = (u8 *)&nonce;
/* Matching nonce if looped back */
- ND_PRINTK(2, notice,
- "%s: IPv6 DAD loopback for address %pI6c nonce %pM ignored\n",
- ifp->idev->dev->name,
- &ifp->addr, np);
+ net_dbg_ratelimited("%s: IPv6 DAD loopback for address %pI6c nonce %pM ignored\n",
+ ifp->idev->dev->name, &ifp->addr, np);
goto out;
}
/*
@@ -1013,13 +1005,13 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
return SKB_DROP_REASON_PKT_TOO_SMALL;
if (ipv6_addr_is_multicast(&msg->target)) {
- ND_PRINTK(2, warn, "NA: target address is multicast\n");
+ net_dbg_ratelimited("NA: target address is multicast\n");
return reason;
}
if (ipv6_addr_is_multicast(daddr) &&
msg->icmph.icmp6_solicited) {
- ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
+ net_dbg_ratelimited("NA: solicited NA is multicasted\n");
return reason;
}
@@ -1038,8 +1030,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
if (ndopts.nd_opts_tgt_lladdr) {
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
if (!lladdr) {
- ND_PRINTK(2, warn,
- "NA: invalid link-layer address length\n");
+ net_dbg_ratelimited("NA: invalid link-layer address length\n");
return reason;
}
}
@@ -1060,9 +1051,9 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
unsolicited advertisement.
*/
if (skb->pkt_type != PACKET_LOOPBACK)
- ND_PRINTK(1, warn,
- "NA: %pM advertised our address %pI6c on %s!\n",
- eth_hdr(skb)->h_source, &ifp->addr, ifp->idev->dev->name);
+ net_warn_ratelimited("NA: %pM advertised our address %pI6c on %s!\n",
+ eth_hdr(skb)->h_source, &ifp->addr,
+ ifp->idev->dev->name);
in6_ifa_put(ifp);
return reason;
}
@@ -1107,7 +1098,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
READ_ONCE(net->ipv6.devconf_all->forwarding) &&
READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
- pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
+ pneigh_lookup(&nd_tbl, net, &msg->target, dev)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
}
@@ -1149,7 +1140,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
if (!idev) {
- ND_PRINTK(1, err, "RS: can't find in6 device\n");
+ net_err_ratelimited("RS: can't find in6 device\n");
return reason;
}
@@ -1257,11 +1248,9 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
sizeof(struct ra_msg);
- ND_PRINTK(2, info,
- "RA: %s, dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, dev: %s\n", __func__, skb->dev->name);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK(2, warn, "RA: source address is not link-local\n");
+ net_dbg_ratelimited("RA: source address is not link-local\n");
return reason;
}
if (optlen < 0)
@@ -1269,15 +1258,14 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
#ifdef CONFIG_IPV6_NDISC_NODETYPE
if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
- ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
+ net_dbg_ratelimited("RA: from host or unauthorized router\n");
return reason;
}
#endif
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev) {
- ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
- skb->dev->name);
+ net_err_ratelimited("RA: can't find inet6 device for %s\n", skb->dev->name);
return reason;
}
@@ -1285,18 +1273,16 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
return SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS;
if (!ipv6_accept_ra(in6_dev)) {
- ND_PRINTK(2, info,
- "RA: %s, did not accept ra for dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, did not accept ra for dev: %s\n", __func__,
+ skb->dev->name);
goto skip_linkparms;
}
#ifdef CONFIG_IPV6_NDISC_NODETYPE
/* skip link-specific parameters from interior routers */
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
- ND_PRINTK(2, info,
- "RA: %s, nodetype is NODEFAULT, dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, nodetype is NODEFAULT, dev: %s\n", __func__,
+ skb->dev->name);
goto skip_linkparms;
}
#endif
@@ -1325,18 +1311,16 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
send_ifinfo_notify = true;
if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
- ND_PRINTK(2, info,
- "RA: %s, defrtr is false for dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, defrtr is false for dev: %s\n", __func__,
+ skb->dev->name);
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
if (lifetime != 0 &&
lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
- ND_PRINTK(2, info,
- "RA: router lifetime (%ds) is too short: %s\n",
- lifetime, skb->dev->name);
+ net_dbg_ratelimited("RA: router lifetime (%ds) is too short: %s\n", lifetime,
+ skb->dev->name);
goto skip_defrtr;
}
@@ -1346,9 +1330,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
net = dev_net(in6_dev->dev);
if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
- ND_PRINTK(2, info,
- "RA from local address detected on dev: %s: default router ignored\n",
- skb->dev->name);
+ net_dbg_ratelimited("RA from local address detected on dev: %s: default router ignored\n",
+ skb->dev->name);
goto skip_defrtr;
}
@@ -1366,9 +1349,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
rt->fib6_nh->fib_nh_dev, NULL,
&ipv6_hdr(skb)->saddr);
if (!neigh) {
- ND_PRINTK(0, err,
- "RA: %s got default router without neighbour\n",
- __func__);
+ net_err_ratelimited("RA: %s got default router without neighbour\n",
+ __func__);
fib6_info_release(rt);
return reason;
}
@@ -1381,10 +1363,10 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
rt = NULL;
}
- ND_PRINTK(3, info, "RA: rt: %p lifetime: %d, metric: %d, for dev: %s\n",
- rt, lifetime, defrtr_usr_metric, skb->dev->name);
+ net_dbg_ratelimited("RA: rt: %p lifetime: %d, metric: %d, for dev: %s\n", rt, lifetime,
+ defrtr_usr_metric, skb->dev->name);
if (!rt && lifetime) {
- ND_PRINTK(3, info, "RA: adding default router\n");
+ net_dbg_ratelimited("RA: adding default router\n");
if (neigh)
neigh_release(neigh);
@@ -1393,9 +1375,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
skb->dev, pref, defrtr_usr_metric,
lifetime);
if (!rt) {
- ND_PRINTK(0, err,
- "RA: %s failed to add default route\n",
- __func__);
+ net_err_ratelimited("RA: %s failed to add default route\n", __func__);
return reason;
}
@@ -1403,9 +1383,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
rt->fib6_nh->fib_nh_dev, NULL,
&ipv6_hdr(skb)->saddr);
if (!neigh) {
- ND_PRINTK(0, err,
- "RA: %s got default router without neighbour\n",
- __func__);
+ net_err_ratelimited("RA: %s got default router without neighbour\n",
+ __func__);
fib6_info_release(rt);
return reason;
}
@@ -1436,7 +1415,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
- ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
+ net_dbg_ratelimited("RA: Got route advertisement with lower hop_limit than minimum\n");
}
}
@@ -1492,8 +1471,7 @@ skip_linkparms:
lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
skb->dev);
if (!lladdr) {
- ND_PRINTK(2, warn,
- "RA: invalid link-layer address length\n");
+ net_dbg_ratelimited("RA: invalid link-layer address length\n");
goto out;
}
}
@@ -1507,9 +1485,8 @@ skip_linkparms:
}
if (!ipv6_accept_ra(in6_dev)) {
- ND_PRINTK(2, info,
- "RA: %s, accept_ra is false for dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, accept_ra is false for dev: %s\n", __func__,
+ skb->dev->name);
goto out;
}
@@ -1517,9 +1494,8 @@ skip_linkparms:
if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
- ND_PRINTK(2, info,
- "RA from local address detected on dev: %s: router info ignored.\n",
- skb->dev->name);
+ net_dbg_ratelimited("RA from local address detected on dev: %s: router info ignored.\n",
+ skb->dev->name);
goto skip_routeinfo;
}
@@ -1555,9 +1531,8 @@ skip_routeinfo:
#ifdef CONFIG_IPV6_NDISC_NODETYPE
/* skip link-specific ndopts from interior routers */
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
- ND_PRINTK(2, info,
- "RA: %s, nodetype is NODEFAULT (interior routes), dev: %s\n",
- __func__, skb->dev->name);
+ net_dbg_ratelimited("RA: %s, nodetype is NODEFAULT (interior routes), dev: %s\n",
+ __func__, skb->dev->name);
goto out;
}
#endif
@@ -1586,7 +1561,7 @@ skip_routeinfo:
}
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
- ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
+ net_dbg_ratelimited("RA: invalid mtu: %d\n", mtu);
} else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
@@ -1605,7 +1580,7 @@ skip_routeinfo:
}
if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
- ND_PRINTK(2, warn, "RA: invalid RA options\n");
+ net_dbg_ratelimited("RA: invalid RA options\n");
}
out:
/* Send a notify if RA changed managed/otherconf flags or
@@ -1633,15 +1608,13 @@ static enum skb_drop_reason ndisc_redirect_rcv(struct sk_buff *skb)
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
case NDISC_NODETYPE_NODEFAULT:
- ND_PRINTK(2, warn,
- "Redirect: from host or unauthorized router\n");
+ net_dbg_ratelimited("Redirect: from host or unauthorized router\n");
return reason;
}
#endif
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK(2, warn,
- "Redirect: source address is not link-local\n");
+ net_dbg_ratelimited("Redirect: source address is not link-local\n");
return reason;
}
@@ -1702,15 +1675,13 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
}
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
- ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
- dev->name);
+ net_dbg_ratelimited("Redirect: no link-local address on %s\n", dev->name);
return;
}
if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK(2, warn,
- "Redirect: target address is not link-local unicast\n");
+ net_dbg_ratelimited("Redirect: target address is not link-local unicast\n");
return;
}
@@ -1729,8 +1700,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_GATEWAY) {
- ND_PRINTK(2, warn,
- "Redirect: destination is not a neighbour\n");
+ net_dbg_ratelimited("Redirect: destination is not a neighbour\n");
goto release;
}
@@ -1743,8 +1713,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
if (dev->addr_len) {
struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
if (!neigh) {
- ND_PRINTK(2, warn,
- "Redirect: no neigh for target address\n");
+ net_dbg_ratelimited("Redirect: no neigh for target address\n");
goto release;
}
@@ -1845,14 +1814,12 @@ enum skb_drop_reason ndisc_rcv(struct sk_buff *skb)
__skb_push(skb, skb->data - skb_transport_header(skb));
if (ipv6_hdr(skb)->hop_limit != 255) {
- ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
- ipv6_hdr(skb)->hop_limit);
+ net_dbg_ratelimited("NDISC: invalid hop-limit: %d\n", ipv6_hdr(skb)->hop_limit);
return SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT;
}
if (msg->icmph.icmp6_code != 0) {
- ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
- msg->icmph.icmp6_code);
+ net_dbg_ratelimited("NDISC: invalid ICMPv6 code: %d\n", msg->icmph.icmp6_code);
return SKB_DROP_REASON_IPV6_NDISC_BAD_CODE;
}
@@ -2003,9 +1970,8 @@ static int __net_init ndisc_net_init(struct net *net)
err = inet_ctl_sock_create(&sk, PF_INET6,
SOCK_RAW, IPPROTO_ICMPV6, net);
if (err < 0) {
- ND_PRINTK(0, err,
- "NDISC: Failed to initialize the control socket (err %d)\n",
- err);
+ net_err_ratelimited("NDISC: Failed to initialize the control socket (err %d)\n",
+ err);
return err;
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 4541836ee3da..46540a5a4331 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,7 +24,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk = sk_to_full_sk(sk_partial);
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst_dev(skb);
struct flow_keys flkeys;
unsigned int hh_len;
struct dst_entry *dst;
@@ -63,7 +63,10 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
#ifdef CONFIG_XFRM
if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
xfrm_decode_session(net, skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
- skb_dst_set(skb, NULL);
+ /* ignore return value from skb_dstref_steal, xfrm_lookup takes
+ * care of dropping the refcnt if needed.
+ */
+ skb_dstref_steal(skb);
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -72,7 +75,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
#endif
/* Change in oif may mean change in hh_len. */
- hh_len = skb_dst(skb)->dev->hard_header_len;
+ hh_len = skb_dst_dev(skb)->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
0, GFP_ATOMIC))
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index e087a8e97ba7..81daf82ddc2d 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -9,9 +9,9 @@ menu "IPv6: Netfilter Configuration"
# old sockopt interface and eval loop
config IP6_NF_IPTABLES_LEGACY
tristate "Legacy IP6 tables support"
- depends on INET && IPV6
- select NETFILTER_XTABLES
- default n
+ depends on INET && IPV6 && NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
+ default m if NETFILTER_XTABLES_LEGACY
help
ip6tables is a legacy packet classifier.
This is not needed if you are using iptables over nftables
@@ -196,8 +196,8 @@ config IP6_NF_TARGET_HL
config IP6_NF_FILTER
tristate "Packet filtering"
- default m if NETFILTER_ADVANCED=n
- select IP6_NF_IPTABLES_LEGACY
+ default m if NETFILTER_ADVANCED=n || IP6_NF_IPTABLES_LEGACY
+ depends on IP6_NF_IPTABLES_LEGACY
tristate
help
Packet filtering defines a table `filter', which has a series of
@@ -233,8 +233,8 @@ config IP6_NF_TARGET_SYNPROXY
config IP6_NF_MANGLE
tristate "Packet mangling"
- default m if NETFILTER_ADVANCED=n
- select IP6_NF_IPTABLES_LEGACY
+ default m if NETFILTER_ADVANCED=n || IP6_NF_IPTABLES_LEGACY
+ depends on IP6_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -244,7 +244,7 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
- select IP6_NF_IPTABLES_LEGACY
+ depends on IP6_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -258,7 +258,7 @@ config IP6_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
- select IP6_NF_IPTABLES_LEGACY
+ depends on IP6_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -269,8 +269,8 @@ config IP6_NF_NAT
tristate "ip6tables NAT support"
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
+ depends on IP6_NF_IPTABLES_LEGACY
select NF_NAT
- select IP6_NF_IPTABLES_LEGACY
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index b903c62c00c9..6da3102b7c1b 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -38,7 +38,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
}
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- skb->dev = dst->dev;
+ skb->dev = dst_dev(dst);
skb->protocol = htons(ETH_P_IPV6);
return true;
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 9ae2b2725bf9..ef5b7e85cffa 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -12,6 +12,19 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
+static struct ipv6hdr *
+nf_reject_ip6hdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int hoplimit);
+static void
+nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ const struct tcphdr *oth, unsigned int otcplen);
+static const struct tcphdr *
+nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *otcph,
+ unsigned int *otcplen, int hook);
+
static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -91,6 +104,32 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset);
+static bool nf_skb_is_icmp6_unreach(const struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ u8 proto = ip6h->nexthdr;
+ u8 _type, *tp;
+ int thoff;
+ __be16 fo;
+
+ thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo);
+
+ if (thoff < 0 || thoff >= skb->len || fo != 0)
+ return false;
+
+ if (proto != IPPROTO_ICMPV6)
+ return false;
+
+ tp = skb_header_pointer(skb,
+ thoff + offsetof(struct icmp6hdr, icmp6_type),
+ sizeof(_type), &_type);
+
+ if (!tp)
+ return false;
+
+ return *tp == ICMPV6_DEST_UNREACH;
+}
+
struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
@@ -104,6 +143,10 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
if (!nf_reject_ip6hdr_validate(oldskb))
return NULL;
+ /* Don't reply to ICMPV6_DEST_UNREACH with ICMPV6_DEST_UNREACH */
+ if (nf_skb_is_icmp6_unreach(oldskb))
+ return NULL;
+
/* Include "As much of invoking packet as possible without the ICMPv6
* packet exceeding the minimum IPv6 MTU" in the ICMP payload.
*/
@@ -146,9 +189,10 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach);
-const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *otcph,
- unsigned int *otcplen, int hook)
+static const struct tcphdr *
+nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
+ struct tcphdr *otcph,
+ unsigned int *otcplen, int hook)
{
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
u8 proto;
@@ -192,11 +236,11 @@ const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
return otcph;
}
-EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
-struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int hoplimit)
+static struct ipv6hdr *
+nf_reject_ip6hdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ __u8 protocol, int hoplimit)
{
struct ipv6hdr *ip6h;
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
@@ -216,11 +260,11 @@ struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
return ip6h;
}
-EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
-void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- const struct tcphdr *oth, unsigned int otcplen)
+static void
+nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
+ const struct sk_buff *oldskb,
+ const struct tcphdr *oth, unsigned int otcplen)
{
struct tcphdr *tcph;
@@ -248,7 +292,6 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
csum_partial(tcph,
sizeof(struct tcphdr), 0));
}
-EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
{
@@ -293,14 +336,14 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
+ if (!skb_dst(oldskb)) {
nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
if (!dst)
return;
skb_dst_set(oldskb, dst);
}
- fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+ fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst_dev(oldskb));
fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
@@ -397,8 +440,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
- nf_reject6_fill_skb_dst(skb_in) < 0)
+ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
return;
icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
index 9ea5ef56cb27..ced8bd44828e 100644
--- a/net/ipv6/netfilter/nf_socket_ipv6.c
+++ b/net/ipv6/netfilter/nf_socket_ipv6.c
@@ -83,8 +83,7 @@ nf_socket_get_sock_v6(struct net *net, struct sk_buff *skb, int doff,
{
switch (protocol) {
case IPPROTO_TCP:
- return inet6_lookup(net, net->ipv4.tcp_death_row.hashinfo,
- skb, doff, saddr, sport, daddr, dport,
+ return inet6_lookup(net, skb, doff, saddr, sport, daddr, dport,
in->ifindex);
case IPPROTO_UDP:
return udp6_lib_lookup(net, saddr, sport, daddr, dport,
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
index 52f828bb5a83..b2f59ed9d7cc 100644
--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
+++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
@@ -80,7 +80,6 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const struct net_device *in,
const enum nf_tproxy_lookup_t lookup_type)
{
- struct inet_hashinfo *hinfo = net->ipv4.tcp_death_row.hashinfo;
struct sock *sk;
switch (protocol) {
@@ -94,7 +93,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
switch (lookup_type) {
case NF_TPROXY_LOOKUP_LISTENER:
- sk = inet6_lookup_listener(net, hinfo, skb,
+ sk = inet6_lookup_listener(net, skb,
thoff + __tcp_hdrlen(hp),
saddr, sport,
daddr, ntohs(dport),
@@ -109,7 +108,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
*/
break;
case NF_TPROXY_LOOKUP_ESTABLISHED:
- sk = __inet6_lookup_established(net, hinfo, saddr, sport, daddr,
+ sk = __inet6_lookup_established(net, saddr, sport, daddr,
ntohs(dport), in->ifindex, 0);
break;
default:
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 806d4b5dd1e6..1c9b283a4132 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -104,18 +104,20 @@ EXPORT_SYMBOL(ip6_find_1stfragopt);
int ip6_dst_hoplimit(struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+
+ rcu_read_lock();
if (hoplimit == 0) {
- struct net_device *dev = dst->dev;
+ struct net_device *dev = dst_dev_rcu(dst);
struct inet6_dev *idev;
- rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
- rcu_read_unlock();
}
+ rcu_read_unlock();
+
return hoplimit;
}
EXPORT_SYMBOL(ip6_dst_hoplimit);
@@ -141,7 +143,7 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
- net, sk, skb, NULL, skb_dst(skb)->dev,
+ net, sk, skb, NULL, skb_dst_dev(skb),
dst_output);
}
EXPORT_SYMBOL_GPL(__ip6_local_out);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 84d90dd8b3f0..d7a2cdaa2631 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -142,7 +142,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_mark = ipc6.sockc.mark;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
@@ -208,7 +208,6 @@ struct proto pingv6_prot = {
.recvmsg = ping_recvmsg,
.bind = ping_bind,
.backlog_rcv = ping_queue_rcv_skb,
- .hash = ping_hash,
.unhash = ping_unhash,
.get_port = ping_get_port,
.put_port = ping_unhash,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 752327b10dde..73296f38c252 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -85,7 +85,6 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_ITEM("Ip6OutTransmits", IPSTATS_MIB_OUTPKTS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp6_icmp6_list[] = {
@@ -95,30 +94,10 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
SNMP_MIB_ITEM("Icmp6InCsumErrors", ICMP6_MIB_CSUMERRORS),
+/* ICMP6_MIB_RATELIMITHOST needs to be last, see snmp6_dev_seq_show(). */
SNMP_MIB_ITEM("Icmp6OutRateLimitHost", ICMP6_MIB_RATELIMITHOST),
- SNMP_MIB_SENTINEL
};
-/* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */
-static const char *const icmp6type2name[256] = {
- [ICMPV6_DEST_UNREACH] = "DestUnreachs",
- [ICMPV6_PKT_TOOBIG] = "PktTooBigs",
- [ICMPV6_TIME_EXCEED] = "TimeExcds",
- [ICMPV6_PARAMPROB] = "ParmProblems",
- [ICMPV6_ECHO_REQUEST] = "Echos",
- [ICMPV6_ECHO_REPLY] = "EchoReplies",
- [ICMPV6_MGM_QUERY] = "GroupMembQueries",
- [ICMPV6_MGM_REPORT] = "GroupMembResponses",
- [ICMPV6_MGM_REDUCTION] = "GroupMembReductions",
- [ICMPV6_MLD2_REPORT] = "MLDv2Reports",
- [NDISC_ROUTER_ADVERTISEMENT] = "RouterAdvertisements",
- [NDISC_ROUTER_SOLICITATION] = "RouterSolicits",
- [NDISC_NEIGHBOUR_ADVERTISEMENT] = "NeighborAdvertisements",
- [NDISC_NEIGHBOUR_SOLICITATION] = "NeighborSolicits",
- [NDISC_REDIRECT] = "Redirects",
-};
-
-
static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
@@ -129,7 +108,6 @@ static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI),
SNMP_MIB_ITEM("Udp6MemErrors", UDP_MIB_MEMERRORS),
- SNMP_MIB_SENTINEL
};
static const struct snmp_mib snmp6_udplite6_list[] = {
@@ -141,7 +119,6 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS),
SNMP_MIB_ITEM("UdpLite6MemErrors", UDP_MIB_MEMERRORS),
- SNMP_MIB_SENTINEL
};
static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
@@ -151,11 +128,31 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
/* print by name -- deprecated items */
for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
+ const char *p = NULL;
int icmptype;
- const char *p;
+
+#define CASE(TYP, STR) case TYP: p = STR; break;
icmptype = i & 0xff;
- p = icmp6type2name[icmptype];
+ switch (icmptype) {
+/* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */
+ CASE(ICMPV6_DEST_UNREACH, "DestUnreachs")
+ CASE(ICMPV6_PKT_TOOBIG, "PktTooBigs")
+ CASE(ICMPV6_TIME_EXCEED, "TimeExcds")
+ CASE(ICMPV6_PARAMPROB, "ParmProblems")
+ CASE(ICMPV6_ECHO_REQUEST, "Echos")
+ CASE(ICMPV6_ECHO_REPLY, "EchoReplies")
+ CASE(ICMPV6_MGM_QUERY, "GroupMembQueries")
+ CASE(ICMPV6_MGM_REPORT, "GroupMembResponses")
+ CASE(ICMPV6_MGM_REDUCTION, "GroupMembReductions")
+ CASE(ICMPV6_MLD2_REPORT, "MLDv2Reports")
+ CASE(NDISC_ROUTER_ADVERTISEMENT, "RouterAdvertisements")
+ CASE(NDISC_ROUTER_SOLICITATION, "RouterSolicits")
+ CASE(NDISC_NEIGHBOUR_ADVERTISEMENT, "NeighborAdvertisements")
+ CASE(NDISC_NEIGHBOUR_SOLICITATION, "NeighborSolicits")
+ CASE(NDISC_REDIRECT, "Redirects")
+ }
+#undef CASE
if (!p) /* don't print un-named types here */
continue;
snprintf(name, sizeof(name), "Icmp6%s%s",
@@ -182,35 +179,37 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
*/
static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
atomic_long_t *smib,
- const struct snmp_mib *itemlist)
+ const struct snmp_mib *itemlist,
+ int cnt)
{
unsigned long buff[SNMP_MIB_MAX];
int i;
if (pcpumib) {
- memset(buff, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
+ memset(buff, 0, sizeof(unsigned long) * cnt);
- snmp_get_cpu_field_batch(buff, itemlist, pcpumib);
- for (i = 0; itemlist[i].name; i++)
+ snmp_get_cpu_field_batch_cnt(buff, itemlist, cnt, pcpumib);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%lu\n",
itemlist[i].name, buff[i]);
} else {
- for (i = 0; itemlist[i].name; i++)
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
atomic_long_read(smib + itemlist[i].entry));
}
}
static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
- const struct snmp_mib *itemlist, size_t syncpoff)
+ const struct snmp_mib *itemlist,
+ int cnt, size_t syncpoff)
{
u64 buff64[SNMP_MIB_MAX];
int i;
- memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
+ memset(buff64, 0, sizeof(u64) * cnt);
- snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
- for (i = 0; itemlist[i].name; i++)
+ snmp_get_cpu_field64_batch_cnt(buff64, itemlist, cnt, mib, syncpoff);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name, buff64[i]);
}
@@ -219,14 +218,19 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
struct net *net = (struct net *)seq->private;
snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
- snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
+ snmp6_ipstats_list,
+ ARRAY_SIZE(snmp6_ipstats_list),
+ offsetof(struct ipstats_mib, syncp));
snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
- NULL, snmp6_icmp6_list);
+ NULL, snmp6_icmp6_list,
+ ARRAY_SIZE(snmp6_icmp6_list));
snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
- NULL, snmp6_udp6_list);
+ NULL, snmp6_udp6_list,
+ ARRAY_SIZE(snmp6_udp6_list));
snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
- NULL, snmp6_udplite6_list);
+ NULL, snmp6_udplite6_list,
+ ARRAY_SIZE(snmp6_udplite6_list));
return 0;
}
@@ -236,9 +240,14 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
snmp6_seq_show_item64(seq, idev->stats.ipv6,
- snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
+ snmp6_ipstats_list,
+ ARRAY_SIZE(snmp6_ipstats_list),
+ offsetof(struct ipstats_mib, syncp));
+
+ /* Per idev icmp stats do not have ICMP6_MIB_RATELIMITHOST */
snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
- snmp6_icmp6_list);
+ snmp6_icmp6_list, ARRAY_SIZE(snmp6_icmp6_list) - 1);
+
snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
return 0;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fda640ebd53f..e369f54844dd 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -163,7 +163,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
if (atomic_read(&sk->sk_rmem_alloc) >=
READ_ONCE(sk->sk_rcvbuf)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
continue;
}
@@ -361,7 +361,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
@@ -389,7 +389,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
struct raw6_sock *rp = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
return NET_RX_DROP;
}
@@ -414,7 +414,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (inet_test_bit(HDRINCL, sk)) {
if (skb_checksum_complete(skb)) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
return NET_RX_DROP;
}
@@ -445,7 +445,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
- if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu))
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
skb = skb_recv_datagram(sk, flags, &err);
@@ -777,7 +777,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = ipc6.sockc.mark;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
@@ -1175,6 +1175,7 @@ static int rawv6_init_sk(struct sock *sk)
{
struct raw6_sock *rp = raw6_sk(sk);
+ sk->sk_drop_counters = &rp->drop_counters;
switch (inet_sk(sk)->inet_num) {
case IPPROTO_ICMPV6:
rp->checksum = 1;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7d4bcf3fda5b..25ec8001898d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -104,11 +104,11 @@ fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
return container_of(q, struct frag_queue, q);
}
-static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
+static int ip6_frag_queue(struct net *net,
+ struct frag_queue *fq, struct sk_buff *skb,
struct frag_hdr *fhdr, int nhoff,
u32 *prob_offset, int *refs)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
int offset, end, fragsize;
struct sk_buff *prev_tail;
struct net_device *dev;
@@ -324,10 +324,10 @@ out_fail:
static int ipv6_frag_rcv(struct sk_buff *skb)
{
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct net *net = skb_dst_dev_net(skb);
struct frag_hdr *fhdr;
struct frag_queue *fq;
- const struct ipv6hdr *hdr = ipv6_hdr(skb);
- struct net *net = dev_net(skb_dst(skb)->dev);
u8 nexthdr;
int iif;
@@ -384,7 +384,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
spin_lock(&fq->q.lock);
fq->iif = iif;
- ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
+ ret = ip6_frag_queue(net, fq, skb, fhdr, IP6CB(skb)->nhoff,
&prob_offset, &refs);
spin_unlock(&fq->q.lock);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 79c8f1acf8a3..aee6a10b112a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -228,13 +228,13 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
const struct rt6_info *rt = dst_rt6_info(dst);
return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
- dst->dev, skb, daddr);
+ dst_dev(dst), skb, daddr);
}
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
const struct rt6_info *rt = dst_rt6_info(dst);
- struct net_device *dev = dst->dev;
+ struct net_device *dev = dst_dev(dst);
daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
if (!daddr)
@@ -391,9 +391,8 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
static bool __rt6_check_expired(const struct rt6_info *rt)
{
if (rt->rt6i_flags & RTF_EXPIRES)
- return time_after(jiffies, rt->dst.expires);
- else
- return false;
+ return time_after(jiffies, READ_ONCE(rt->dst.expires));
+ return false;
}
static bool rt6_check_expired(const struct rt6_info *rt)
@@ -403,10 +402,10 @@ static bool rt6_check_expired(const struct rt6_info *rt)
from = rcu_dereference(rt->from);
if (rt->rt6i_flags & RTF_EXPIRES) {
- if (time_after(jiffies, rt->dst.expires))
+ if (time_after(jiffies, READ_ONCE(rt->dst.expires)))
return true;
} else if (from) {
- return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
+ return READ_ONCE(rt->dst.obsolete) != DST_OBSOLETE_FORCE_CHK ||
fib6_check_expired(from);
}
return false;
@@ -1145,6 +1144,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input;
+ rt->dst.output = ip6_mr_output;
} else {
rt->dst.input = ip6_forward;
}
@@ -2133,12 +2133,13 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
* expired, independently from their aging, as per RFC 8201 section 4
*/
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
- if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+ if (time_after_eq(now, READ_ONCE(rt->dst.lastuse) +
+ gc_args->timeout)) {
pr_debug("aging clone %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
}
- } else if (time_after(jiffies, rt->dst.expires)) {
+ } else if (time_after(jiffies, READ_ONCE(rt->dst.expires))) {
pr_debug("purging expired route %p\n", rt);
rt6_remove_exception(bucket, rt6_ex);
return;
@@ -2776,11 +2777,10 @@ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
u32 cookie)
{
if (!__rt6_check_expired(rt) &&
- rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
fib6_check(from, cookie))
return &rt->dst;
- else
- return NULL;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
@@ -2870,7 +2870,7 @@ static void rt6_update_expires(struct rt6_info *rt0, int timeout)
rcu_read_lock();
from = rcu_dereference(rt0->from);
if (from)
- rt0->dst.expires = from->expires;
+ WRITE_ONCE(rt0->dst.expires, from->expires);
rcu_read_unlock();
}
@@ -2943,7 +2943,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
- .dev = dst->dev,
+ .dev = dst_dev_rcu(dst),
.gw = &rt6->rt6i_gateway,
};
@@ -3010,10 +3010,10 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
oif = l3mdev_master_ifindex(skb->dev);
ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
- sk->sk_uid);
+ sk_uid(sk));
dst = __sk_dst_get(sk);
- if (!dst || !dst->obsolete ||
+ if (!dst || !READ_ONCE(dst->obsolete) ||
dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
return;
@@ -3032,13 +3032,12 @@ void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
#endif
ip6_dst_store(sk, dst,
- ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
- &sk->sk_v6_daddr : NULL,
+ ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr),
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
- &np->saddr :
+ true :
#endif
- NULL);
+ false);
}
static bool ip6_redirect_nh_match(const struct fib6_result *res,
@@ -3232,13 +3231,12 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
- READ_ONCE(sk->sk_mark), sk->sk_uid);
+ READ_ONCE(sk->sk_mark), sk_uid(sk));
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
- struct net_device *dev = dst->dev;
unsigned int mtu = dst_mtu(dst);
struct net *net;
@@ -3246,7 +3244,7 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
rcu_read_lock();
- net = dev_net_rcu(dev);
+ net = dst_dev_net_rcu(dst);
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
@@ -4301,7 +4299,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
if (res.f6i->nh) {
struct fib6_nh_match_arg arg = {
- .dev = dst->dev,
+ .dev = dst_dev_rcu(dst),
.gw = &rt->rt6i_gateway,
};
@@ -4587,13 +4585,14 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
struct dst_entry *dst = skb_dst(skb);
- struct net *net = dev_net(dst->dev);
+ struct net_device *dev = dst_dev(dst);
+ struct net *net = dev_net(dev);
struct inet6_dev *idev;
SKB_DR(reason);
int type;
if (netif_is_l3_master(skb->dev) ||
- dst->dev == net->loopback_dev)
+ dev == net->loopback_dev)
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
else
idev = ip6_dst_idev(dst);
@@ -4630,7 +4629,7 @@ static int ip6_pkt_discard(struct sk_buff *skb)
static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- skb->dev = skb_dst(skb)->dev;
+ skb->dev = skb_dst_dev(skb);
return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
}
@@ -4641,7 +4640,7 @@ static int ip6_pkt_prohibit(struct sk_buff *skb)
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- skb->dev = skb_dst(skb)->dev;
+ skb->dev = skb_dst_dev(skb);
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
@@ -5346,7 +5345,8 @@ static void ip6_route_mpath_notify(struct fib6_info *rt,
*/
rcu_read_lock();
- if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
+ if ((nlflags & NLM_F_APPEND) && rt_last &&
+ READ_ONCE(rt_last->fib6_nsiblings)) {
rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
struct fib6_info,
fib6_siblings);
@@ -5670,32 +5670,34 @@ static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
static size_t rt6_nlmsg_size(struct fib6_info *f6i)
{
+ struct fib6_info *sibling;
+ struct fib6_nh *nh;
int nexthop_len;
if (f6i->nh) {
nexthop_len = nla_total_size(4); /* RTA_NH_ID */
nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
&nexthop_len);
- } else {
- struct fib6_nh *nh = f6i->fib6_nh;
- struct fib6_info *sibling;
-
- nexthop_len = 0;
- if (f6i->fib6_nsiblings) {
- rt6_nh_nlmsg_size(nh, &nexthop_len);
-
- rcu_read_lock();
+ goto common;
+ }
- list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
- fib6_siblings) {
- rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
- }
+ rcu_read_lock();
+retry:
+ nh = f6i->fib6_nh;
+ nexthop_len = 0;
+ if (READ_ONCE(f6i->fib6_nsiblings)) {
+ rt6_nh_nlmsg_size(nh, &nexthop_len);
- rcu_read_unlock();
+ list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
+ fib6_siblings) {
+ rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
+ if (!READ_ONCE(f6i->fib6_nsiblings))
+ goto retry;
}
- nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
}
-
+ rcu_read_unlock();
+ nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+common:
return NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(16) /* RTA_SRC */
+ nla_total_size(16) /* RTA_DST */
@@ -5844,17 +5846,19 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
* each as a nexthop within RTA_MULTIPATH.
*/
if (rt6) {
+ struct net_device *dev;
+
if (rt6_flags & RTF_GATEWAY &&
nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
goto nla_put_failure;
- if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
+ dev = dst_dev(dst);
+ if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
- if (dst->lwtstate &&
- lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
- } else if (rt->fib6_nsiblings) {
+ } else if (READ_ONCE(rt->fib6_nsiblings)) {
struct fib6_info *sibling;
struct nlattr *mp;
@@ -5904,7 +5908,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
}
if (rt6_flags & RTF_EXPIRES) {
- expires = dst ? dst->expires : rt->expires;
+ expires = dst ? READ_ONCE(dst->expires) : rt->expires;
expires -= jiffies;
}
@@ -5956,16 +5960,21 @@ static bool fib6_info_uses_dev(const struct fib6_info *f6i,
if (f6i->fib6_nh->fib_nh_dev == dev)
return true;
- if (f6i->fib6_nsiblings) {
- struct fib6_info *sibling, *next_sibling;
+ if (READ_ONCE(f6i->fib6_nsiblings)) {
+ const struct fib6_info *sibling;
- list_for_each_entry_safe(sibling, next_sibling,
- &f6i->fib6_siblings, fib6_siblings) {
- if (sibling->fib6_nh->fib_nh_dev == dev)
+ rcu_read_lock();
+ list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
+ fib6_siblings) {
+ if (sibling->fib6_nh->fib_nh_dev == dev) {
+ rcu_read_unlock();
return true;
+ }
+ if (!READ_ONCE(f6i->fib6_nsiblings))
+ break;
}
+ rcu_read_unlock();
}
-
return false;
}
@@ -6321,8 +6330,9 @@ errout:
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int nlm_flags)
{
- struct sk_buff *skb;
struct net *net = info->nl_net;
+ struct sk_buff *skb;
+ size_t sz;
u32 seq;
int err;
@@ -6330,17 +6340,21 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
seq = info->nlh ? info->nlh->nlmsg_seq : 0;
rcu_read_lock();
-
- skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC);
+ sz = rt6_nlmsg_size(rt);
+retry:
+ skb = nlmsg_new(sz, GFP_ATOMIC);
if (!skb)
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
event, info->portid, seq, nlm_flags);
if (err < 0) {
- /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
+ /* -EMSGSIZE implies needed space grew under us. */
+ if (err == -EMSGSIZE) {
+ sz = max(rt6_nlmsg_size(rt), sz << 1);
+ goto retry;
+ }
goto errout;
}
@@ -6805,8 +6819,7 @@ void __init ip6_route_init_special_entries(void)
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
-BTF_ID_LIST(btf_fib6_info_id)
-BTF_ID(struct, fib6_info)
+BTF_ID_LIST_SINGLE(btf_fib6_info_id, struct, fib6_info)
static const struct bpf_iter_seq_info ipv6_route_seq_info = {
.seq_ops = &ipv6_route_seq_ops,
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
index 7c05ac846646..c7942cf65567 100644
--- a/net/ipv6/rpl_iptunnel.c
+++ b/net/ipv6/rpl_iptunnel.c
@@ -129,13 +129,13 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
struct dst_entry *cache_dst)
{
struct ipv6_rpl_sr_hdr *isrh, *csrh;
- const struct ipv6hdr *oldhdr;
+ struct ipv6hdr oldhdr;
struct ipv6hdr *hdr;
unsigned char *buf;
size_t hdrlen;
int err;
- oldhdr = ipv6_hdr(skb);
+ memcpy(&oldhdr, ipv6_hdr(skb), sizeof(oldhdr));
buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC);
if (!buf)
@@ -147,7 +147,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
memcpy(isrh, srh, sizeof(*isrh));
memcpy(isrh->rpl_segaddr, &srh->rpl_segaddr[1],
(srh->segments_left - 1) * 16);
- isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr->daddr;
+ isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr.daddr;
ipv6_rpl_srh_compress(csrh, isrh, &srh->rpl_segaddr[0],
isrh->segments_left - 1);
@@ -169,7 +169,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
skb_mac_header_rebuild(skb);
hdr = ipv6_hdr(skb);
- memmove(hdr, oldhdr, sizeof(*hdr));
+ memmove(hdr, &oldhdr, sizeof(*hdr));
isrh = (void *)hdr + sizeof(*hdr);
memcpy(isrh, csrh, hdrlen);
@@ -242,7 +242,7 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
local_bh_enable();
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
if (unlikely(err))
goto drop;
}
@@ -297,7 +297,7 @@ static int rpl_input(struct sk_buff *skb)
local_bh_enable();
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
if (unlikely(err))
goto drop;
} else {
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 180da19c148c..a5c4c629b788 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -522,16 +522,10 @@ int __init seg6_init(void)
if (err)
goto out_unregister_iptun;
- err = seg6_hmac_init();
- if (err)
- goto out_unregister_seg6;
-
pr_info("Segment Routing with IPv6\n");
out:
return err;
-out_unregister_seg6:
- seg6_local_exit();
out_unregister_iptun:
seg6_iptunnel_exit();
out_unregister_genl:
@@ -543,7 +537,6 @@ out_unregister_pernet:
void seg6_exit(void)
{
- seg6_hmac_exit();
seg6_local_exit();
seg6_iptunnel_exit();
genl_unregister_family(&seg6_genl_family);
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index f78ecb6ad838..ee6bac0160ac 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -16,7 +16,6 @@
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/mroute6.h>
-#include <linux/slab.h>
#include <linux/rhashtable.h>
#include <linux/netfilter.h>
@@ -34,7 +33,9 @@
#include <net/addrconf.h>
#include <net/xfrm.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include <net/seg6.h>
#include <net/genetlink.h>
#include <net/seg6_hmac.h>
@@ -77,17 +78,6 @@ static const struct rhashtable_params rht_params = {
.obj_cmpfn = seg6_hmac_cmpfn,
};
-static struct seg6_hmac_algo hmac_algos[] = {
- {
- .alg_id = SEG6_HMAC_ALGO_SHA1,
- .name = "hmac(sha1)",
- },
- {
- .alg_id = SEG6_HMAC_ALGO_SHA256,
- .name = "hmac(sha256)",
- },
-};
-
static struct sr6_tlv_hmac *seg6_get_tlv_hmac(struct ipv6_sr_hdr *srh)
{
struct sr6_tlv_hmac *tlv;
@@ -107,75 +97,13 @@ static struct sr6_tlv_hmac *seg6_get_tlv_hmac(struct ipv6_sr_hdr *srh)
return tlv;
}
-static struct seg6_hmac_algo *__hmac_get_algo(u8 alg_id)
-{
- struct seg6_hmac_algo *algo;
- int i, alg_count;
-
- alg_count = ARRAY_SIZE(hmac_algos);
- for (i = 0; i < alg_count; i++) {
- algo = &hmac_algos[i];
- if (algo->alg_id == alg_id)
- return algo;
- }
-
- return NULL;
-}
-
-static int __do_hmac(struct seg6_hmac_info *hinfo, const char *text, u8 psize,
- u8 *output, int outlen)
-{
- struct seg6_hmac_algo *algo;
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int ret, dgsize;
-
- algo = __hmac_get_algo(hinfo->alg_id);
- if (!algo)
- return -ENOENT;
-
- tfm = *this_cpu_ptr(algo->tfms);
-
- dgsize = crypto_shash_digestsize(tfm);
- if (dgsize > outlen) {
- pr_debug("sr-ipv6: __do_hmac: digest size too big (%d / %d)\n",
- dgsize, outlen);
- return -ENOMEM;
- }
-
- ret = crypto_shash_setkey(tfm, hinfo->secret, hinfo->slen);
- if (ret < 0) {
- pr_debug("sr-ipv6: crypto_shash_setkey failed: err %d\n", ret);
- goto failed;
- }
-
- shash = *this_cpu_ptr(algo->shashs);
- shash->tfm = tfm;
-
- ret = crypto_shash_digest(shash, text, psize, output);
- if (ret < 0) {
- pr_debug("sr-ipv6: crypto_shash_digest failed: err %d\n", ret);
- goto failed;
- }
-
- return dgsize;
-
-failed:
- return ret;
-}
-
int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
struct in6_addr *saddr, u8 *output)
{
__be32 hmackeyid = cpu_to_be32(hinfo->hmackeyid);
- u8 tmp_out[SEG6_HMAC_MAX_DIGESTSIZE];
- int plen, i, dgsize, wrsize;
+ int plen, i, ret = 0;
char *ring, *off;
- /* a 160-byte buffer for digest output allows to store highest known
- * hash function (RadioGatun) with up to 1216 bits
- */
-
/* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
@@ -218,22 +146,25 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
off += 16;
}
- dgsize = __do_hmac(hinfo, ring, plen, tmp_out,
- SEG6_HMAC_MAX_DIGESTSIZE);
+ switch (hinfo->alg_id) {
+ case SEG6_HMAC_ALGO_SHA1:
+ hmac_sha1(&hinfo->key.sha1, ring, plen, output);
+ static_assert(SEG6_HMAC_FIELD_LEN > SHA1_DIGEST_SIZE);
+ memset(&output[SHA1_DIGEST_SIZE], 0,
+ SEG6_HMAC_FIELD_LEN - SHA1_DIGEST_SIZE);
+ break;
+ case SEG6_HMAC_ALGO_SHA256:
+ hmac_sha256(&hinfo->key.sha256, ring, plen, output);
+ static_assert(SEG6_HMAC_FIELD_LEN == SHA256_DIGEST_SIZE);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
local_unlock_nested_bh(&hmac_storage.bh_lock);
local_bh_enable();
-
- if (dgsize < 0)
- return dgsize;
-
- wrsize = SEG6_HMAC_FIELD_LEN;
- if (wrsize > dgsize)
- wrsize = dgsize;
-
- memset(output, 0, SEG6_HMAC_FIELD_LEN);
- memcpy(output, tmp_out, wrsize);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(seg6_hmac_compute);
@@ -280,7 +211,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
if (seg6_hmac_compute(hinfo, srh, &ipv6_hdr(skb)->saddr, hmac_output))
return false;
- if (memcmp(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN) != 0)
+ if (crypto_memneq(hmac_output, tlv->hmac, SEG6_HMAC_FIELD_LEN))
return false;
return true;
@@ -304,6 +235,19 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
struct seg6_pernet_data *sdata = seg6_pernet(net);
int err;
+ switch (hinfo->alg_id) {
+ case SEG6_HMAC_ALGO_SHA1:
+ hmac_sha1_preparekey(&hinfo->key.sha1,
+ hinfo->secret, hinfo->slen);
+ break;
+ case SEG6_HMAC_ALGO_SHA256:
+ hmac_sha256_preparekey(&hinfo->key.sha256,
+ hinfo->secret, hinfo->slen);
+ break;
+ default:
+ return -EINVAL;
+ }
+
err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
rht_params);
@@ -359,65 +303,6 @@ out:
}
EXPORT_SYMBOL(seg6_push_hmac);
-static int seg6_hmac_init_algo(void)
-{
- struct seg6_hmac_algo *algo;
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int i, alg_count, cpu;
- int ret = -ENOMEM;
-
- alg_count = ARRAY_SIZE(hmac_algos);
-
- for (i = 0; i < alg_count; i++) {
- struct crypto_shash **p_tfm;
- int shsize;
-
- algo = &hmac_algos[i];
- algo->tfms = alloc_percpu(struct crypto_shash *);
- if (!algo->tfms)
- goto error_out;
-
- for_each_possible_cpu(cpu) {
- tfm = crypto_alloc_shash(algo->name, 0, 0);
- if (IS_ERR(tfm)) {
- ret = PTR_ERR(tfm);
- goto error_out;
- }
- p_tfm = per_cpu_ptr(algo->tfms, cpu);
- *p_tfm = tfm;
- }
-
- p_tfm = raw_cpu_ptr(algo->tfms);
- tfm = *p_tfm;
-
- shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
-
- algo->shashs = alloc_percpu(struct shash_desc *);
- if (!algo->shashs)
- goto error_out;
-
- for_each_possible_cpu(cpu) {
- shash = kzalloc_node(shsize, GFP_KERNEL,
- cpu_to_node(cpu));
- if (!shash)
- goto error_out;
- *per_cpu_ptr(algo->shashs, cpu) = shash;
- }
- }
-
- return 0;
-
-error_out:
- seg6_hmac_exit();
- return ret;
-}
-
-int __init seg6_hmac_init(void)
-{
- return seg6_hmac_init_algo();
-}
-
int __net_init seg6_hmac_net_init(struct net *net)
{
struct seg6_pernet_data *sdata = seg6_pernet(net);
@@ -425,36 +310,6 @@ int __net_init seg6_hmac_net_init(struct net *net)
return rhashtable_init(&sdata->hmac_infos, &rht_params);
}
-void seg6_hmac_exit(void)
-{
- struct seg6_hmac_algo *algo = NULL;
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int i, alg_count, cpu;
-
- alg_count = ARRAY_SIZE(hmac_algos);
- for (i = 0; i < alg_count; i++) {
- algo = &hmac_algos[i];
-
- if (algo->shashs) {
- for_each_possible_cpu(cpu) {
- shash = *per_cpu_ptr(algo->shashs, cpu);
- kfree(shash);
- }
- free_percpu(algo->shashs);
- }
-
- if (algo->tfms) {
- for_each_possible_cpu(cpu) {
- tfm = *per_cpu_ptr(algo->tfms, cpu);
- crypto_free_shash(tfm);
- }
- free_percpu(algo->tfms);
- }
- }
-}
-EXPORT_SYMBOL(seg6_hmac_exit);
-
void __net_exit seg6_hmac_net_exit(struct net *net)
{
struct seg6_pernet_data *sdata = seg6_pernet(net);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 51583461ae29..3e1b9991131a 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -128,7 +128,8 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto, struct dst_entry *cache_dst)
{
struct dst_entry *dst = skb_dst(skb);
- struct net *net = dev_net(dst->dev);
+ struct net_device *dev = dst_dev(dst);
+ struct net *net = dev_net(dev);
struct ipv6hdr *hdr, *inner_hdr;
struct ipv6_sr_hdr *isrh;
int hdrlen, tot_len, err;
@@ -181,7 +182,7 @@ static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
isrh->nexthdr = proto;
hdr->daddr = isrh->segments[isrh->first_segment];
- set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+ set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC
if (sr_has_hmac(isrh)) {
@@ -212,7 +213,8 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
{
__u8 first_seg = osrh->first_segment;
struct dst_entry *dst = skb_dst(skb);
- struct net *net = dev_net(dst->dev);
+ struct net_device *dev = dst_dev(dst);
+ struct net *net = dev_net(dev);
struct ipv6hdr *hdr, *inner_hdr;
int hdrlen = ipv6_optlen(osrh);
int red_tlv_offset, tlv_offset;
@@ -270,7 +272,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
if (skip_srh) {
hdr->nexthdr = proto;
- set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+ set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
goto out;
}
@@ -306,7 +308,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb,
srcaddr:
isrh->nexthdr = proto;
- set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+ set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
#ifdef CONFIG_IPV6_SEG6_HMAC
if (unlikely(!skip_srh && sr_has_hmac(isrh))) {
@@ -362,7 +364,7 @@ static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
#ifdef CONFIG_IPV6_SEG6_HMAC
if (sr_has_hmac(isrh)) {
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = skb_dst_dev_net(skb);
err = seg6_push_hmac(net, &hdr->saddr, isrh);
if (unlikely(err))
@@ -507,7 +509,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
local_bh_enable();
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
if (unlikely(err))
goto drop;
} else {
@@ -518,7 +520,7 @@ static int seg6_input_core(struct net *net, struct sock *sk,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, seg6_input_finish);
+ skb_dst_dev(skb), seg6_input_finish);
return seg6_input_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -528,7 +530,7 @@ drop:
static int seg6_input_nf(struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst_dev(skb);
struct net *net = dev_net(skb->dev);
switch (skb->protocol) {
@@ -593,7 +595,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
local_bh_enable();
}
- err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
if (unlikely(err))
goto drop;
}
@@ -603,7 +605,7 @@ static int seg6_output_core(struct net *net, struct sock *sk,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
- NULL, skb_dst(skb)->dev, dst_output);
+ NULL, dst_dev(dst), dst_output);
return dst_output(net, sk, skb);
drop:
@@ -614,7 +616,7 @@ drop:
static int seg6_output_nf(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_device *dev = skb_dst(skb)->dev;
+ struct net_device *dev = skb_dst_dev(skb);
switch (skb->protocol) {
case htons(ETH_P_IP):
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index a11a02b4ba95..2b41e4c0dddd 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -270,7 +270,7 @@ static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
static int
seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
- u32 tbl_id, bool local_delivery)
+ u32 tbl_id, bool local_delivery, int oif)
{
struct net *net = dev_net(skb->dev);
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -282,6 +282,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_iif = skb->dev->ifindex;
+ fl6.flowi6_oif = oif;
fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
fl6.saddr = hdr->saddr;
fl6.flowlabel = ip6_flowinfo(hdr);
@@ -291,17 +292,19 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
if (nhaddr)
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
- if (!tbl_id) {
+ if (!tbl_id && !oif) {
dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
- } else {
+ } else if (tbl_id) {
struct fib6_table *table;
table = fib6_get_table(net, tbl_id);
if (!table)
goto out;
- rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
+ rt = ip6_pol_route(net, table, oif, &fl6, skb, flags);
dst = &rt->dst;
+ } else {
+ dst = ip6_route_output(net, NULL, &fl6);
}
/* we want to discard traffic destined for local packet processing,
@@ -310,7 +313,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
if (!local_delivery)
dev_flags |= IFF_LOOPBACK;
- if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
+ if (dst && (dst_dev(dst)->flags & dev_flags) && !dst->error) {
dst_release(dst);
dst = NULL;
}
@@ -330,7 +333,7 @@ out:
int seg6_lookup_nexthop(struct sk_buff *skb,
struct in6_addr *nhaddr, u32 tbl_id)
{
- return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
+ return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false, 0);
}
static __u8 seg6_flv_lcblock_octects(const struct seg6_flavors_info *finfo)
@@ -418,7 +421,7 @@ static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
static int input_action_end_x_finish(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
- seg6_lookup_nexthop(skb, &slwt->nh6, 0);
+ seg6_lookup_any_nexthop(skb, &slwt->nh6, 0, false, slwt->oif);
return dst_input(skb);
}
@@ -1277,7 +1280,7 @@ static int input_action_end_dt6(struct sk_buff *skb,
/* note: this time we do not need to specify the table because the VRF
* takes care of selecting the correct table.
*/
- seg6_lookup_any_nexthop(skb, NULL, 0, true);
+ seg6_lookup_any_nexthop(skb, NULL, 0, true, 0);
return dst_input(skb);
@@ -1285,7 +1288,7 @@ legacy_mode:
#endif
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
- seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
+ seg6_lookup_any_nexthop(skb, NULL, slwt->table, true, 0);
return dst_input(skb);
@@ -1477,7 +1480,8 @@ static struct seg6_action_desc seg6_action_table[] = {
.action = SEG6_LOCAL_ACTION_END_X,
.attrs = SEG6_F_ATTR(SEG6_LOCAL_NH6),
.optattrs = SEG6_F_LOCAL_COUNTERS |
- SEG6_F_LOCAL_FLAVORS,
+ SEG6_F_LOCAL_FLAVORS |
+ SEG6_F_ATTR(SEG6_LOCAL_OIF),
.input = input_action_end_x,
},
{
@@ -2083,7 +2087,7 @@ struct nla_policy seg6_local_flavors_policy[SEG6_LOCAL_FLV_MAX + 1] = {
static int seg6_chk_next_csid_cfg(__u8 block_len, __u8 func_len)
{
/* Locator-Block and Locator-Node Function cannot exceed 128 bits
- * (i.e. C-SID container lenghts).
+ * (i.e. C-SID container length).
*/
if (next_csid_chk_cntr_bits(block_len, func_len))
return -EINVAL;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a72dbca9e8fc..cf37ad9686e6 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -848,6 +848,49 @@ static inline __be32 try_6rd(struct ip_tunnel *tunnel,
return dst;
}
+static bool ipip6_tunnel_dst_find(struct sk_buff *skb, __be32 *dst,
+ bool is_isatap)
+{
+ const struct ipv6hdr *iph6 = ipv6_hdr(skb);
+ struct neighbour *neigh = NULL;
+ const struct in6_addr *addr6;
+ bool found = false;
+ int addr_type;
+
+ if (skb_dst(skb))
+ neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
+
+ if (!neigh) {
+ net_dbg_ratelimited("nexthop == NULL\n");
+ return false;
+ }
+
+ addr6 = (const struct in6_addr *)&neigh->primary_key;
+ addr_type = ipv6_addr_type(addr6);
+
+ if (is_isatap) {
+ if ((addr_type & IPV6_ADDR_UNICAST) &&
+ ipv6_addr_is_isatap(addr6)) {
+ *dst = addr6->s6_addr32[3];
+ found = true;
+ }
+ } else {
+ if (addr_type == IPV6_ADDR_ANY) {
+ addr6 = &ipv6_hdr(skb)->daddr;
+ addr_type = ipv6_addr_type(addr6);
+ }
+
+ if ((addr_type & IPV6_ADDR_COMPATv4) != 0) {
+ *dst = addr6->s6_addr32[3];
+ found = true;
+ }
+ }
+
+ neigh_release(neigh);
+
+ return found;
+}
+
/*
* This function assumes it is being called from dev_queue_xmit()
* and that skb is filled properly by that function.
@@ -867,8 +910,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
__be32 dst = tiph->daddr;
struct flowi4 fl4;
int mtu;
- const struct in6_addr *addr6;
- int addr_type;
u8 ttl;
u8 protocol = IPPROTO_IPV6;
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
@@ -877,64 +918,15 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tos = ipv6_get_dsfield(iph6);
/* ISATAP (RFC4214) - must come before 6to4 */
- if (dev->priv_flags & IFF_ISATAP) {
- struct neighbour *neigh = NULL;
- bool do_tx_error = false;
-
- if (skb_dst(skb))
- neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
-
- if (!neigh) {
- net_dbg_ratelimited("nexthop == NULL\n");
- goto tx_error;
- }
-
- addr6 = (const struct in6_addr *)&neigh->primary_key;
- addr_type = ipv6_addr_type(addr6);
-
- if ((addr_type & IPV6_ADDR_UNICAST) &&
- ipv6_addr_is_isatap(addr6))
- dst = addr6->s6_addr32[3];
- else
- do_tx_error = true;
-
- neigh_release(neigh);
- if (do_tx_error)
- goto tx_error;
- }
+ if ((dev->priv_flags & IFF_ISATAP) &&
+ !ipip6_tunnel_dst_find(skb, &dst, true))
+ goto tx_error;
if (!dst)
dst = try_6rd(tunnel, &iph6->daddr);
- if (!dst) {
- struct neighbour *neigh = NULL;
- bool do_tx_error = false;
-
- if (skb_dst(skb))
- neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
-
- if (!neigh) {
- net_dbg_ratelimited("nexthop == NULL\n");
- goto tx_error;
- }
-
- addr6 = (const struct in6_addr *)&neigh->primary_key;
- addr_type = ipv6_addr_type(addr6);
-
- if (addr_type == IPV6_ADDR_ANY) {
- addr6 = &ipv6_hdr(skb)->daddr;
- addr_type = ipv6_addr_type(addr6);
- }
-
- if ((addr_type & IPV6_ADDR_COMPATv4) != 0)
- dst = addr6->s6_addr32[3];
- else
- do_tx_error = true;
-
- neigh_release(neigh);
- if (do_tx_error)
- goto tx_error;
- }
+ if (!dst && !ipip6_tunnel_dst_find(skb, &dst, false))
+ goto tx_error;
flowi4_init_output(&fl4, tunnel->parms.link, tunnel->fwmark,
tos & INET_DSCP_MASK, RT_SCOPE_UNIVERSE,
@@ -1035,7 +1027,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return NETDEV_TX_OK;
tx_error_icmp:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 9d83eadd308b..7e007f013ec8 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -16,6 +16,7 @@
#include <net/secure_seq.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/tcp_ecn.h>
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -236,7 +237,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl6.flowi6_mark = ireq->ir_mark;
fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
@@ -264,6 +265,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
if (!req->syncookie)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
+ tcp_rsk(req)->accecn_ok = ireq->ecn_ok && cookie_accecn_ok(th);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
if (!ret) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8e68a142649..59c4977a811a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/aligned_data.h>
#include <net/tcp.h>
#include <net/ndisc.h>
#include <net/inet6_hashtables.h>
@@ -61,6 +62,7 @@
#include <net/hotdata.h>
#include <net/busy_poll.h>
#include <net/rstreason.h>
+#include <net/psp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -269,7 +271,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_sport = inet->inet_sport;
if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !fl6.fl6_sport)
fl6.flowi6_flags = FLOWI_FLAG_ANY_SPORT;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
@@ -298,12 +300,12 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
- ip6_dst_store(sk, dst, NULL, NULL);
+ ip6_dst_store(sk, dst, false, false);
- icsk->icsk_ext_hdr_len = 0;
+ icsk->icsk_ext_hdr_len = psp_sk_overhead(sk);
if (opt)
- icsk->icsk_ext_hdr_len = opt->opt_flen +
- opt->opt_nflen;
+ icsk->icsk_ext_hdr_len += opt->opt_flen +
+ opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
@@ -387,8 +389,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
bool fatal;
int err;
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &hdr->daddr, th->dest,
+ sk = __inet6_lookup_established(net, &hdr->daddr, th->dest,
&hdr->saddr, ntohs(th->source),
skb->dev->ifindex, inet6_sdif(skb));
@@ -544,6 +545,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
if (skb) {
+ tcp_rsk(req)->syn_ect_snt = np->tclass & INET_ECN_MASK;
__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
&ireq->ir_v6_rmt_addr);
@@ -835,7 +837,6 @@ static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock),
- .rtx_syn_ack = tcp_rtx_synack,
.send_ack = tcp_v6_reqsk_send_ack,
.destructor = tcp_v6_reqsk_destructor,
.send_reset = tcp_v6_send_reset,
@@ -868,7 +869,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
int oif, int rst, u8 tclass, __be32 label,
u32 priority, u32 txhash, struct tcp_key *key)
{
- struct net *net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
+ struct net *net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb);
unsigned int tot_len = sizeof(struct tcphdr);
struct sock *ctl_sk = net->ipv6.tcp_sk;
const struct tcphdr *th = tcp_hdr(skb);
@@ -973,6 +974,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
if (sk) {
/* unconstify the socket only to attach it to buff with care. */
skb_set_owner_edemux(buff, (struct sock *)sk);
+ psp_reply_set_decrypted(sk, buff);
if (sk->sk_state == TCP_TIME_WAIT)
mark = inet_twsk(sk)->tw_mark;
@@ -1043,7 +1045,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
if (!sk && !ipv6_unicast_destination(skb))
return;
- net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
+ net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb);
/* Invalid TCP option size or twice included auth */
if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
return;
@@ -1073,8 +1075,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
- sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
- NULL, 0, &ipv6h->saddr, th->source,
+ sk1 = inet6_lookup_listener(net, NULL, 0, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->source),
dif, sdif);
if (!sk1)
@@ -1431,17 +1432,17 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
ireq = inet_rsk(req);
if (sk_acceptq_is_full(sk))
- goto out_overflow;
+ goto exit_overflow;
if (!dst) {
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
if (!dst)
- goto out;
+ goto exit;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)
- goto out_nonewsk;
+ goto exit_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
@@ -1460,7 +1461,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
- ip6_dst_store(newsk, dst, NULL, NULL);
+ ip6_dst_store(newsk, dst, false, false);
newnp->saddr = ireq->ir_v6_loc_addr;
@@ -1525,25 +1526,19 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
const union tcp_md5_addr *addr;
addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
- if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
- inet_csk_prepare_forced_close(newsk);
- tcp_done(newsk);
- goto out;
- }
+ if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key))
+ goto put_and_exit;
}
}
#endif
#ifdef CONFIG_TCP_AO
/* Copy over tcp_ao_info if any */
if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
- goto out; /* OOM */
+ goto put_and_exit; /* OOM */
#endif
- if (__inet_inherit_port(sk, newsk) < 0) {
- inet_csk_prepare_forced_close(newsk);
- tcp_done(newsk);
- goto out;
- }
+ if (__inet_inherit_port(sk, newsk) < 0)
+ goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
&found_dup_sk);
if (*own_req) {
@@ -1570,13 +1565,17 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
return newsk;
-out_overflow:
+exit_overflow:
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-out_nonewsk:
+exit_nonewsk:
dst_release(dst);
-out:
+exit:
tcp_listendrop(sk);
return NULL;
+put_and_exit:
+ inet_csk_prepare_forced_close(newsk);
+ tcp_done(newsk);
+ goto exit;
}
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
@@ -1608,6 +1607,10 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
+ reason = psp_sk_rx_policy_check(sk, skb);
+ if (reason)
+ goto err_discard;
+
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
@@ -1687,6 +1690,7 @@ csum_err:
reason = SKB_DROP_REASON_TCP_CSUM;
trace_tcp_bad_csum(skb);
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+err_discard:
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard;
@@ -1789,7 +1793,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
hdr = ipv6_hdr(skb);
lookup:
- sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
+ sk = __inet6_lookup_skb(skb, __tcp_hdrlen(th),
th->source, th->dest, inet6_iif(skb), sdif,
&refcounted);
if (!sk)
@@ -1811,7 +1815,7 @@ lookup:
&hdr->saddr, &hdr->daddr,
AF_INET6, dif, sdif);
if (drop_reason) {
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
reqsk_put(req);
goto discard_it;
}
@@ -1834,14 +1838,12 @@ lookup:
}
refcounted = true;
nsk = NULL;
- if (!tcp_filter(sk, skb)) {
+ if (!tcp_filter(sk, skb, &drop_reason)) {
th = (const struct tcphdr *)skb->data;
hdr = ipv6_hdr(skb);
tcp_v6_fill_cb(skb, hdr, th);
nsk = tcp_check_req(sk, skb, req, false, &req_stolen,
&drop_reason);
- } else {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
}
if (!nsk) {
reqsk_put(req);
@@ -1897,10 +1899,9 @@ process:
nf_reset_ct(skb);
- if (tcp_filter(sk, skb)) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ if (tcp_filter(sk, skb, &drop_reason))
goto discard_and_relse;
- }
+
th = (const struct tcphdr *)skb->data;
hdr = ipv6_hdr(skb);
tcp_v6_fill_cb(skb, hdr, th);
@@ -1953,7 +1954,7 @@ discard_it:
return 0;
discard_and_relse:
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
if (refcounted)
sock_put(sk);
goto discard_it;
@@ -1979,8 +1980,7 @@ do_time_wait:
{
struct sock *sk2;
- sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
- skb, __tcp_hdrlen(th),
+ sk2 = inet6_lookup_listener(net, skb, __tcp_hdrlen(th),
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr,
ntohs(th->dest),
@@ -1995,6 +1995,10 @@ do_time_wait:
__this_cpu_write(tcp_tw_isn, isn);
goto process;
}
+
+ drop_reason = psp_twsk_rx_policy_check(inet_twsk(sk), skb);
+ if (drop_reason)
+ break;
}
/* to ACK */
fallthrough;
@@ -2032,8 +2036,7 @@ void tcp_v6_early_demux(struct sk_buff *skb)
return;
/* Note : We use inet6_iif() here, not tcp_v6_iif() */
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &hdr->saddr, th->source,
+ sk = __inet6_lookup_established(net, &hdr->saddr, th->source,
&hdr->daddr, ntohs(th->dest),
inet6_iif(skb), inet6_sdif(skb));
if (sk) {
@@ -2053,7 +2056,6 @@ void tcp_v6_early_demux(struct sk_buff *skb)
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
- .twsk_destructor = tcp_twsk_destructor,
};
INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
@@ -2120,6 +2122,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
+
+static void tcp6_destruct_sock(struct sock *sk)
+{
+ tcp_md5_destruct_sock(sk);
+ tcp_ao_destroy_sock(sk, false);
+ inet6_sock_destruct(sk);
+}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2135,6 +2144,7 @@ static int tcp_v6_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
+ sk->sk_destruct = tcp6_destruct_sock;
#endif
return 0;
@@ -2168,7 +2178,7 @@ static void get_openreq6(struct seq_file *seq,
jiffies_to_clock_t(ttd),
req->num_timeout,
from_kuid_munged(seq_user_ns(seq),
- sock_i_uid(req->rsk_listener)),
+ sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0, req);
@@ -2233,9 +2243,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
- icsk->icsk_retransmits,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
- icsk->icsk_probes_out,
+ READ_ONCE(icsk->icsk_retransmits),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
+ READ_ONCE(icsk->icsk_probes_out),
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
@@ -2345,7 +2355,7 @@ struct proto tcpv6_prot = {
.splice_eof = tcp_splice_eof,
.backlog_rcv = tcp_v6_do_rcv,
.release_cb = tcp_release_cb,
- .hash = inet6_hash,
+ .hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.put_port = inet_put_port,
@@ -2357,11 +2367,10 @@ struct proto tcpv6_prot = {
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
- .memory_allocated = &tcp_memory_allocated,
+ .memory_allocated = &net_aligned_data.tcp_memory_allocated,
.per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
.memory_pressure = &tcp_memory_pressure,
- .orphan_count = &tcp_orphan_count,
.sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
.sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index a8a04f441e78..effeba58630b 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -36,8 +36,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
inet6_get_iif_sdif(skb, &iif, &sdif);
hdr = skb_gro_network_header(skb);
net = dev_net_rcu(skb->dev);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &hdr->saddr, th->source,
+ sk = __inet6_lookup_established(net, &hdr->saddr, th->source,
&hdr->daddr, ntohs(th->dest),
iif, sdif);
NAPI_GRO_CB(skb)->is_flist = !sk;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7317f8e053f1..813a2ba75824 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -67,10 +67,11 @@ static void udpv6_destruct_sock(struct sock *sk)
int udpv6_init_sock(struct sock *sk)
{
- udp_lib_init_sock(sk);
+ int res = udp_lib_init_sock(sk);
+
sk->sk_destruct = udpv6_destruct_sock;
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
- return 0;
+ return res;
}
INDIRECT_CALLABLE_SCOPE
@@ -260,7 +261,7 @@ rescore:
/* compute_score is too long of a function to be
* inlined, and calling it again here yields
- * measureable overhead for some
+ * measurable overhead for some
* workloads. Work around it by jumping
* backwards to rescore 'result'.
*/
@@ -449,7 +450,7 @@ struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
#endif
-/* do not use the scratch area len for jumbogram: their length execeeds the
+/* do not use the scratch area len for jumbogram: their length exceeds the
* scratch area space; note that the IP6CB flags is still in the first
* cacheline, so checking for jumbograms is cheap
*/
@@ -479,7 +480,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
- if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+ if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu))
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
@@ -524,7 +525,7 @@ try_again:
}
if (unlikely(err)) {
if (!peeking) {
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
}
kfree_skb(skb);
@@ -750,7 +751,8 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == NDISC_REDIRECT) {
if (tunnel) {
ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
- READ_ONCE(sk->sk_mark), sk->sk_uid);
+ READ_ONCE(sk->sk_mark),
+ sk_uid(sk));
} else {
ip6_sk_redirect(skb, sk);
}
@@ -893,10 +895,8 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
udp_lib_checksum_complete(skb))
goto csum_error;
- if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
- drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
goto drop;
- }
udp_csum_pull_header(skb);
@@ -909,7 +909,7 @@ csum_error:
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
sk_skb_reason_drop(sk, skb, drop_reason);
return -1;
}
@@ -1014,7 +1014,7 @@ start_lookup:
}
nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) {
- atomic_inc(&sk->sk_drops);
+ udp_drops_inc(sk);
__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
@@ -1049,7 +1049,7 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
-/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+/* wrapper for udp_queue_rcv_skb taking care of csum conversion and
* return code conversion for ip layer consumption
*/
static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
@@ -1620,7 +1620,7 @@ do_udp_sendmsg:
if (!fl6->flowi6_oif)
fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
if (msg->msg_controllen) {
opt = &opt_space;
@@ -1924,7 +1924,7 @@ struct proto udpv6_prot = {
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
- .memory_allocated = &udp_memory_allocated,
+ .memory_allocated = &net_aligned_data.udp_memory_allocated,
.per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 0590f566379d..8a406be25a3a 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _UDP6_IMPL_H
#define _UDP6_IMPL_H
+#include <net/aligned_data.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/protocol.h>
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index d8445ac1b2e4..046f13b1d77a 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -154,8 +154,6 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
ip6_gro_compute_pseudo);
skip:
- NAPI_GRO_CB(skb)->is_ipv6 = 1;
-
if (static_branch_unlikely(&udpv6_encap_needed_key))
sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index a60bec9b14f1..2cec542437f7 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -59,7 +59,7 @@ struct proto udplitev6_prot = {
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
- .memory_allocated = &udp_memory_allocated,
+ .memory_allocated = &net_aligned_data.udp_memory_allocated,
.per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841c81abaaf4..9005fc156a20 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -202,6 +202,9 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
goto out;
+ /* set the transport header to ESP */
+ skb_set_transport_header(skb, offset);
+
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index b3d5d1f266ee..512bdaf13699 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -106,7 +106,7 @@ skip_frag:
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, skb->dev, skb_dst(skb)->dev,
+ net, sk, skb, skb->dev, skb_dst_dev(skb),
__xfrm6_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index bf140ef781c1..0a0eeaed0591 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -334,8 +334,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
+ xfrm_state_flush(net, 0, false);
xfrm_flush_gc();
- xfrm_state_flush(net, 0, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cc2b3c44bc05..6c717a7ef292 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1187,7 +1187,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
- atomic_inc(&sk->sk_drops); /* skb rejected by filter */
+ sk_drops_inc(sk); /* skb rejected by filter */
kfree_skb(skb);
return;
}
@@ -2011,7 +2011,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
- atomic_inc(&sk->sk_drops); /* skb rejected by filter */
+ sk_drops_inc(sk); /* skb rejected by filter */
kfree_skb(skb);
return NET_RX_SUCCESS;
}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 83070a2e4485..473a7847d80b 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -24,6 +24,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 24aec295a51c..b4f01cb07561 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -19,6 +19,7 @@
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
+#include <linux/splice.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/syscalls.h>
@@ -429,7 +430,7 @@ static void psock_write_space(struct sock *sk)
/* Check if the socket is reserved so someone is waiting for sending. */
kcm = psock->tx_kcm;
- if (kcm && !unlikely(kcm->tx_stopped))
+ if (kcm)
queue_work(kcm_wq, &kcm->tx_work);
spin_unlock_bh(&mux->lock);
@@ -835,8 +836,7 @@ start:
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
- err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, copy);
if (err < 0) {
if (err == -EMSGSIZE)
goto wait_for_memory;
@@ -1030,6 +1030,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
ssize_t copied;
struct sk_buff *skb;
+ if (sock->file->f_flags & O_NONBLOCK || flags & SPLICE_F_NONBLOCK)
+ flags = MSG_DONTWAIT;
+ else
+ flags = 0;
+
/* Only support splice for SOCKSEQPACKET */
skb = skb_recv_datagram(sk, flags, &err);
@@ -1688,12 +1693,6 @@ static int kcm_release(struct socket *sock)
*/
__skb_queue_purge(&sk->sk_write_queue);
- /* Set tx_stopped. This is checked when psock is bound to a kcm and we
- * get a writespace callback. This prevents further work being queued
- * from the callback (unbinding the psock occurs after canceling work.
- */
- kcm->tx_stopped = 1;
-
release_sock(sk);
spin_lock_bh(&mux->lock);
@@ -1709,7 +1708,7 @@ static int kcm_release(struct socket *sock)
/* Cancel work. After this point there should be no outside references
* to the kcm socket.
*/
- cancel_work_sync(&kcm->tx_work);
+ disable_work_sync(&kcm->tx_work);
lock_sock(sk);
psock = kcm->tx_psock;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index efc2a91f4c48..2ebde0352245 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1766,7 +1766,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
if (proto == 0)
return -EINVAL;
- err = xfrm_state_flush(net, proto, true, false);
+ err = xfrm_state_flush(net, proto, true);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - go quietly */
@@ -3788,7 +3788,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
refcount_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(s)),
sock_i_ino(s)
);
return 0;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index b98d13584c81..ea232f338dcb 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -545,7 +545,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
ipcm6_init_sk(&ipc6, sk);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index fc5c2fd8f34c..5e12e7ce17d8 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -129,22 +129,12 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
static const struct proto_ops pppol2tp_ops;
-/* Retrieves the pppol2tp socket associated to a session.
- * A reference is held on the returned socket, so this function must be paired
- * with sock_put().
- */
+/* Retrieves the pppol2tp socket associated to a session. */
static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct sock *sk;
-
- rcu_read_lock();
- sk = rcu_dereference(ps->sk);
- if (sk)
- sock_hold(sk);
- rcu_read_unlock();
- return sk;
+ return rcu_dereference(ps->sk);
}
/* Helpers to obtain tunnel/session contexts from sockets.
@@ -206,14 +196,13 @@ end:
static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
- struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct sock *sk = NULL;
+ struct sock *sk;
/* If the socket is bound, send it in to PPP's input queue. Otherwise
* queue it on the session socket.
*/
rcu_read_lock();
- sk = rcu_dereference(ps->sk);
+ sk = pppol2tp_session_get_sock(session);
if (!sk)
goto no_sock;
@@ -510,13 +499,14 @@ static void pppol2tp_show(struct seq_file *m, void *arg)
struct l2tp_session *session = arg;
struct sock *sk;
+ rcu_read_lock();
sk = pppol2tp_session_get_sock(session);
if (sk) {
struct pppox_sock *po = pppox_sk(sk);
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
- sock_put(sk);
}
+ rcu_read_unlock();
}
static void pppol2tp_session_init(struct l2tp_session *session)
@@ -1530,6 +1520,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
port = ntohs(inet->inet_sport);
}
+ rcu_read_lock();
sk = pppol2tp_session_get_sock(session);
if (sk) {
state = sk->sk_state;
@@ -1565,8 +1556,8 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
struct pppox_sock *po = pppox_sk(sk);
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
- sock_put(sk);
}
+ rcu_read_unlock();
}
static int pppol2tp_seq_show(struct seq_file *m, void *v)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index cc77ec5769d8..5958a80fe14c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -210,7 +210,7 @@ static int llc_ui_release(struct socket *sock)
dprintk("%s: closing local(%02X) remote(%02X)\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
- llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ llc_ui_wait_for_disc(sk, READ_ONCE(sk->sk_rcvtimeo));
if (!sock_flag(sk, SOCK_ZAPPED)) {
struct llc_sap *sap = llc->sap;
@@ -455,7 +455,7 @@ static int llc_ui_shutdown(struct socket *sock, int how)
goto out;
rc = llc_send_disc(sk);
if (!rc)
- rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ rc = llc_ui_wait_for_disc(sk, READ_ONCE(sk->sk_rcvtimeo));
/* Wake up anyone sleeping in poll */
sk->sk_state_change(sk);
out:
@@ -712,7 +712,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock,
goto out;
/* wait for a connection to arrive. */
if (skb_queue_empty(&sk->sk_receive_queue)) {
- rc = llc_wait_data(sk, sk->sk_rcvtimeo);
+ rc = llc_wait_data(sk, READ_ONCE(sk->sk_rcvtimeo));
if (rc)
goto out;
}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 07e9abb5978a..aa81c67b24a1 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
llc->link);
out:
return 0;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index ee534797c033..e38f46ffebfa 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -299,7 +299,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
if (!sta->sta.valid_links &&
!sta->sta.deflink.ht_cap.ht_supported &&
- !sta->sta.deflink.he_cap.has_he) {
+ !sta->sta.deflink.he_cap.has_he &&
+ !sta->sta.deflink.s1g_cap.s1g) {
ht_dbg(sta->sdata,
"STA %pM erroneously requests BA session on tid %d w/o HT\n",
sta->sta.addr, tid);
@@ -327,7 +328,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1) &&
(sta->sta.valid_links ||
- !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
+ !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) ||
+ !(sta->sta.deflink.s1g_cap.cap[3] & S1G_CAP3_HT_DELAYED_BA))) ||
(buf_size > max_buf_size)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
ht_dbg_ratelimited(sta->sdata,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index dbd9ad5f3992..d981b0fc57bf 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
!pubsta->deflink.ht_cap.ht_supported &&
!pubsta->deflink.vht_cap.vht_supported &&
!pubsta->deflink.he_cap.has_he &&
- !pubsta->deflink.eht_cap.has_eht)
+ !pubsta->deflink.eht_cap.has_eht &&
+ !pubsta->deflink.s1g_cap.s1g)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d9d88f2f2831..d9aca1c3c097 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -178,6 +178,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
link_conf->nontransmitted = true;
link_conf->bssid_index = params->index;
+ link_conf->bssid_indicator = tx_bss_conf->bssid_indicator;
}
if (params->ema)
link_conf->ema_ap = true;
@@ -310,6 +311,96 @@ static void ieee80211_stop_p2p_device(struct wiphy *wiphy,
ieee80211_sdata_stop(IEEE80211_WDEV_TO_SUB_IF(wdev));
}
+static void ieee80211_nan_conf_free(struct cfg80211_nan_conf *conf)
+{
+ kfree(conf->cluster_id);
+ kfree(conf->extra_nan_attrs);
+ kfree(conf->vendor_elems);
+ memset(conf, 0, sizeof(*conf));
+}
+
+static void ieee80211_stop_nan(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+
+ if (!sdata->u.nan.started)
+ return;
+
+ drv_stop_nan(sdata->local, sdata);
+ sdata->u.nan.started = false;
+
+ ieee80211_nan_conf_free(&sdata->u.nan.conf);
+
+ ieee80211_sdata_stop(sdata);
+ ieee80211_recalc_idle(sdata->local);
+}
+
+static int ieee80211_nan_conf_copy(struct cfg80211_nan_conf *dst,
+ struct cfg80211_nan_conf *src,
+ u32 changes)
+{
+ if (changes & CFG80211_NAN_CONF_CHANGED_PREF)
+ dst->master_pref = src->master_pref;
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_BANDS)
+ dst->bands = src->bands;
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_CONFIG) {
+ dst->scan_period = src->scan_period;
+ dst->scan_dwell_time = src->scan_dwell_time;
+ dst->discovery_beacon_interval =
+ src->discovery_beacon_interval;
+ dst->enable_dw_notification = src->enable_dw_notification;
+ memcpy(&dst->band_cfgs, &src->band_cfgs,
+ sizeof(dst->band_cfgs));
+
+ kfree(dst->cluster_id);
+ dst->cluster_id = NULL;
+
+ kfree(dst->extra_nan_attrs);
+ dst->extra_nan_attrs = NULL;
+ dst->extra_nan_attrs_len = 0;
+
+ kfree(dst->vendor_elems);
+ dst->vendor_elems = NULL;
+ dst->vendor_elems_len = 0;
+
+ if (src->cluster_id) {
+ dst->cluster_id = kmemdup(src->cluster_id, ETH_ALEN,
+ GFP_KERNEL);
+ if (!dst->cluster_id)
+ goto no_mem;
+ }
+
+ if (src->extra_nan_attrs && src->extra_nan_attrs_len) {
+ dst->extra_nan_attrs = kmemdup(src->extra_nan_attrs,
+ src->extra_nan_attrs_len,
+ GFP_KERNEL);
+ if (!dst->extra_nan_attrs)
+ goto no_mem;
+
+ dst->extra_nan_attrs_len = src->extra_nan_attrs_len;
+ }
+
+ if (src->vendor_elems && src->vendor_elems_len) {
+ dst->vendor_elems = kmemdup(src->vendor_elems,
+ src->vendor_elems_len,
+ GFP_KERNEL);
+ if (!dst->vendor_elems)
+ goto no_mem;
+
+ dst->vendor_elems_len = src->vendor_elems_len;
+ }
+ }
+
+ return 0;
+
+no_mem:
+ ieee80211_nan_conf_free(dst);
+ return -ENOMEM;
+}
+
static int ieee80211_start_nan(struct wiphy *wiphy,
struct wireless_dev *wdev,
struct cfg80211_nan_conf *conf)
@@ -319,6 +410,9 @@ static int ieee80211_start_nan(struct wiphy *wiphy,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
+ if (sdata->u.nan.started)
+ return -EALREADY;
+
ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1);
if (ret < 0)
return ret;
@@ -328,21 +422,21 @@ static int ieee80211_start_nan(struct wiphy *wiphy,
return ret;
ret = drv_start_nan(sdata->local, sdata, conf);
- if (ret)
+ if (ret) {
ieee80211_sdata_stop(sdata);
+ return ret;
+ }
- sdata->u.nan.conf = *conf;
-
- return ret;
-}
+ sdata->u.nan.started = true;
+ ieee80211_recalc_idle(sdata->local);
-static void ieee80211_stop_nan(struct wiphy *wiphy,
- struct wireless_dev *wdev)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ ret = ieee80211_nan_conf_copy(&sdata->u.nan.conf, conf, 0xFFFFFFFF);
+ if (ret) {
+ ieee80211_stop_nan(wiphy, wdev);
+ return ret;
+ }
- drv_stop_nan(sdata->local, sdata);
- ieee80211_sdata_stop(sdata);
+ return 0;
}
static int ieee80211_nan_change_conf(struct wiphy *wiphy,
@@ -351,7 +445,7 @@ static int ieee80211_nan_change_conf(struct wiphy *wiphy,
u32 changes)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct cfg80211_nan_conf new_conf;
+ struct cfg80211_nan_conf new_conf = {};
int ret = 0;
if (sdata->vif.type != NL80211_IFTYPE_NAN)
@@ -360,17 +454,28 @@ static int ieee80211_nan_change_conf(struct wiphy *wiphy,
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
- new_conf = sdata->u.nan.conf;
+ if (!changes)
+ return 0;
- if (changes & CFG80211_NAN_CONF_CHANGED_PREF)
- new_conf.master_pref = conf->master_pref;
+ /* First make a full copy of the previous configuration and then apply
+ * the changes. This might be a little wasteful, but it is simpler.
+ */
+ ret = ieee80211_nan_conf_copy(&new_conf, &sdata->u.nan.conf,
+ 0xFFFFFFFF);
+ if (ret < 0)
+ return ret;
- if (changes & CFG80211_NAN_CONF_CHANGED_BANDS)
- new_conf.bands = conf->bands;
+ ret = ieee80211_nan_conf_copy(&new_conf, conf, changes);
+ if (ret < 0)
+ return ret;
ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes);
- if (!ret)
+ if (ret) {
+ ieee80211_nan_conf_free(&new_conf);
+ } else {
+ ieee80211_nan_conf_free(&sdata->u.nan.conf);
sdata->u.nan.conf = new_conf;
+ }
return ret;
}
@@ -885,6 +990,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
ret = 0;
memcpy(mac, sta->sta.addr, ETH_ALEN);
sta_set_sinfo(sta, sinfo, true);
+
+ /* Add accumulated removed link data to sinfo data for
+ * consistency for MLO
+ */
+ if (sinfo->valid_links)
+ sta_set_accumulated_removed_links_sinfo(sta, sinfo);
+
}
return ret;
@@ -912,6 +1024,12 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
if (sta) {
ret = 0;
sta_set_sinfo(sta, sinfo, true);
+
+ /* Add accumulated removed link data to sinfo data for
+ * consistency for MLO
+ */
+ if (sinfo->valid_links)
+ sta_set_accumulated_removed_links_sinfo(sta, sinfo);
}
return ret;
@@ -1057,6 +1175,47 @@ ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int
+ieee80211_set_s1g_short_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ struct cfg80211_s1g_short_beacon *params)
+{
+ struct s1g_short_beacon_data *new;
+ struct s1g_short_beacon_data *old =
+ sdata_dereference(link->u.ap.s1g_short_beacon, sdata);
+ size_t new_len =
+ sizeof(*new) + params->short_head_len + params->short_tail_len;
+
+ if (!params->update)
+ return 0;
+
+ if (!params->short_head)
+ return -EINVAL;
+
+ new = kzalloc(new_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ /* Memory layout: | struct | head | tail | */
+ new->short_head = (u8 *)new + sizeof(*new);
+ new->short_head_len = params->short_head_len;
+ memcpy(new->short_head, params->short_head, params->short_head_len);
+
+ if (params->short_tail) {
+ new->short_tail = new->short_head + params->short_head_len;
+ new->short_tail_len = params->short_tail_len;
+ memcpy(new->short_tail, params->short_tail,
+ params->short_tail_len);
+ }
+
+ rcu_assign_pointer(link->u.ap.s1g_short_beacon, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ return 0;
+}
+
static int ieee80211_set_ftm_responder_params(
struct ieee80211_sub_if_data *sdata,
const u8 *lci, size_t lci_len,
@@ -1121,13 +1280,13 @@ ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst,
{
int i, offset = 0;
+ dst->cnt = src->cnt;
for (i = 0; i < src->cnt; i++) {
memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
dst->elem[i].len = src->elem[i].len;
dst->elem[i].data = pos + offset;
offset += dst->elem[i].len;
}
- dst->cnt = src->cnt;
return offset;
}
@@ -1218,8 +1377,11 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_copy_rnr_beacon(pos, new->rnr_ies, rnr);
}
/* update bssid_indicator */
- link_conf->bssid_indicator =
- ilog2(__roundup_pow_of_two(mbssid->cnt + 1));
+ if (new->mbssid_ies->cnt && new->mbssid_ies->elem[0].len > 2)
+ link_conf->bssid_indicator =
+ *(new->mbssid_ies->elem[0].data + 2);
+ else
+ link_conf->bssid_indicator = 0;
}
if (csa) {
@@ -1324,6 +1486,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
struct ieee80211_chan_req chanreq = { .oper = params->chandef };
+ u64 tsf;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1476,8 +1639,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->twt_responder = params->twt_responder;
link_conf->he_obss_pd = params->he_obss_pd;
link_conf->he_bss_color = params->beacon.he_bss_color;
- sdata->vif.cfg.s1g = params->chandef.chan->band ==
- NL80211_BAND_S1GHZ;
+ link_conf->s1g_long_beacon_period = params->s1g_long_beacon_period;
+ sdata->vif.cfg.s1g = params->chandef.chan->band == NL80211_BAND_S1GHZ;
sdata->vif.cfg.ssid_len = params->ssid_len;
if (params->ssid_len)
@@ -1524,6 +1687,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (err < 0)
goto error;
+ if (sdata->vif.cfg.s1g) {
+ err = ieee80211_set_s1g_short_beacon(sdata, link,
+ &params->s1g_short_beacon);
+ if (err < 0)
+ goto error;
+ }
+
err = drv_start_ap(sdata->local, sdata, link_conf);
if (err) {
old = sdata_dereference(link->u.ap.beacon, sdata);
@@ -1538,7 +1708,12 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
}
- ieee80211_recalc_dtim(local, sdata);
+ tsf = drv_get_tsf(local, sdata);
+ ieee80211_recalc_dtim(sdata, tsf);
+
+ if (link->u.ap.s1g_short_beacon)
+ ieee80211_recalc_sb_count(sdata, tsf);
+
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -1602,6 +1777,13 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
if (err < 0)
return err;
+ if (link->u.ap.s1g_short_beacon) {
+ err = ieee80211_set_s1g_short_beacon(sdata, link,
+ &params->s1g_short_beacon);
+ if (err < 0)
+ return err;
+ }
+
if (beacon->he_bss_color_valid &&
beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
link_conf->he_bss_color.enabled = beacon->he_bss_color.enabled;
@@ -1633,6 +1815,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
struct probe_resp *old_probe_resp;
struct fils_discovery_data *old_fils_discovery;
struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp;
+ struct s1g_short_beacon_data *old_s1g_short_beacon;
struct cfg80211_chan_def chandef;
struct ieee80211_link_data *link =
sdata_dereference(sdata->link[link_id], sdata);
@@ -1651,6 +1834,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
old_unsol_bcast_probe_resp =
sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
sdata);
+ old_s1g_short_beacon =
+ sdata_dereference(link->u.ap.s1g_short_beacon, sdata);
/* abort any running channel switch or color change */
link_conf->csa_active = false;
@@ -1673,6 +1858,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
RCU_INIT_POINTER(link->u.ap.probe_resp, NULL);
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
+ RCU_INIT_POINTER(link->u.ap.s1g_short_beacon, NULL);
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
@@ -1680,6 +1866,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
kfree_rcu(old_fils_discovery, rcu_head);
if (old_unsol_bcast_probe_resp)
kfree_rcu(old_unsol_bcast_probe_resp, rcu_head);
+ if (old_s1g_short_beacon)
+ kfree_rcu(old_s1g_short_beacon, rcu_head);
kfree(link_conf->ftmr_params);
link_conf->ftmr_params = NULL;
@@ -1703,6 +1891,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
sdata->vif.cfg.ssid_len = 0;
+ sdata->vif.cfg.s1g = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BEACON_ENABLED);
@@ -1878,6 +2067,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
params->vht_capa ||
params->he_capa ||
params->eht_capa ||
+ params->s1g_capa ||
params->opmode_notif_used;
switch (mode) {
@@ -1956,9 +2146,27 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
params->eht_capa_len,
link_sta);
+ if (params->s1g_capa)
+ ieee80211_s1g_cap_to_sta_s1g_cap(sdata, params->s1g_capa,
+ link_sta);
+
ieee80211_sta_init_nss(link_sta);
if (params->opmode_notif_used) {
+ enum nl80211_chan_width width = link->conf->chanreq.oper.width;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_320: /* not VHT, allowed for HE/EHT */
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* returned value is only needed for rc update, but the
* rc isn't initialized here yet, so ignore it
*/
@@ -2067,10 +2275,16 @@ static int sta_apply_parameters(struct ieee80211_local *local,
/*
* cfg80211 validates this (1-2007) and allows setting the AID
- * only when creating a new station entry
+ * only when creating a new station entry. For S1G APs, the current
+ * implementation supports a maximum of 1600 AIDs.
*/
- if (params->aid)
+ if (params->aid) {
+ if (sdata->vif.cfg.s1g &&
+ params->aid > IEEE80211_MAX_SUPPORTED_S1G_AID)
+ return -EINVAL;
+
sta->sta.aid = params->aid;
+ }
/*
* Some of the following updates would be racy if called on an
@@ -2897,6 +3111,9 @@ static int ieee80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *req)
{
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ int radio_idx;
sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
@@ -2924,10 +3141,20 @@ static int ieee80211_scan(struct wiphy *wiphy,
* the frames sent while scanning on other channel will be
* lost)
*/
- if (ieee80211_num_beaconing_links(sdata) &&
- (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
- !(req->flags & NL80211_SCAN_FLAG_AP)))
- return -EOPNOTSUPP;
+ for_each_link_data(sdata, link) {
+ /* if the link is not beaconing, ignore it */
+ if (!sdata_dereference(link->u.ap.beacon, sdata))
+ continue;
+
+ chan = link->conf->chanreq.oper.chan;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+
+ if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+ radio_idx) &&
+ (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
+ !(req->flags & NL80211_SCAN_FLAG_AP)))
+ return -EOPNOTSUPP;
+ }
break;
case NL80211_IFTYPE_NAN:
default:
@@ -3028,7 +3255,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int ieee80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
int err;
@@ -3036,7 +3264,8 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
ieee80211_check_fast_xmit_all(local);
- err = drv_set_frag_threshold(local, wiphy->frag_threshold);
+ err = drv_set_frag_threshold(local, radio_idx,
+ wiphy->frag_threshold);
if (err) {
ieee80211_check_fast_xmit_all(local);
@@ -3050,14 +3279,23 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ?
wiphy->coverage_class : -1;
- err = drv_set_coverage_class(local, coverage_class);
+ err = drv_set_coverage_class(local, radio_idx,
+ coverage_class);
if (err)
return err;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- err = drv_set_rts_threshold(local, wiphy->rts_threshold);
+ u32 rts_threshold;
+
+ if ((radio_idx == -1) || (radio_idx >= wiphy->n_radio))
+ rts_threshold = wiphy->rts_threshold;
+ else
+ rts_threshold =
+ wiphy->radio_cfg[radio_idx].rts_threshold;
+
+ err = drv_set_rts_threshold(local, radio_idx, rts_threshold);
if (err)
return err;
@@ -3075,18 +3313,19 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
if (changed &
(WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
+ ieee80211_hw_config(local, radio_idx,
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS);
if (changed & (WIPHY_PARAM_TXQ_LIMIT |
WIPHY_PARAM_TXQ_MEMORY_LIMIT |
WIPHY_PARAM_TXQ_QUANTUM))
- ieee80211_txq_set_params(local);
+ ieee80211_txq_set_params(local, radio_idx);
return 0;
}
static int ieee80211_set_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
+ struct wireless_dev *wdev, int radio_idx,
enum nl80211_tx_power_setting type, int mbm)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -3214,6 +3453,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
static int ieee80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
unsigned int link_id,
int *dbm)
{
@@ -3392,7 +3632,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
@@ -3549,6 +3789,43 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
+static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy,
+ struct ieee80211_local *local,
+ struct cfg80211_chan_def *chandef)
+{
+ struct cfg80211_scan_request *scan_req;
+ int chan_radio_idx, req_radio_idx;
+ struct ieee80211_roc_work *roc;
+
+ if (list_empty(&local->roc_list) && !local->scanning)
+ return false;
+
+ req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan);
+
+ if (local->scanning) {
+ scan_req = wiphy_dereference(wiphy, local->scan_req);
+ /*
+ * Scan is going on but info is not there. Should not happen
+ * but if it does, let's not take risk and assume we can't use
+ * the hw hence return true
+ */
+ if (WARN_ON_ONCE(!scan_req))
+ return true;
+
+ return ieee80211_is_radio_idx_in_scan_req(wiphy, scan_req,
+ req_radio_idx);
+ }
+
+ list_for_each_entry(roc, &local->roc_list, list) {
+ chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy,
+ roc->chan);
+ if (chan_radio_idx == req_radio_idx)
+ return true;
+ }
+
+ return false;
+}
+
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
@@ -3562,7 +3839,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning)
+ if (ieee80211_is_scan_ongoing(wiphy, local, chandef))
return -EBUSY;
link_data = sdata_dereference(sdata->link[link_id], sdata);
@@ -3742,7 +4019,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
*/
struct ieee80211_link_data *iter;
- for_each_sdata_link(local, iter) {
+ for_each_sdata_link_rcu(local, iter) {
if (iter->sdata == sdata ||
rcu_access_pointer(iter->conf->tx_bss_conf) != tx_bss_conf)
continue;
@@ -4054,7 +4331,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning)
+ if (ieee80211_is_scan_ongoing(wiphy, local, &params->chandef))
return -EBUSY;
if (sdata->wdev.links[link_id].cac_started)
@@ -4088,6 +4365,12 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
+ err = ieee80211_set_unsol_bcast_probe_resp(sdata,
+ &params->unsol_bcast_probe_resp,
+ link_data, link_conf, &changed);
+ if (err)
+ goto out;
+
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
ch_switch.timestamp = 0;
@@ -4238,7 +4521,8 @@ ieee80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+static int ieee80211_set_antenna(struct wiphy *wiphy, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
int ret;
@@ -4254,11 +4538,12 @@ static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
return 0;
}
-static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
+static int ieee80211_get_antenna(struct wiphy *wiphy, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
- return drv_get_antenna(local, tx_ant, rx_ant);
+ return drv_get_antenna(local, radio_idx, tx_ant, rx_ant);
}
static int ieee80211_set_rekey_data(struct wiphy *wiphy,
@@ -4650,7 +4935,6 @@ static int ieee80211_get_txq_stats(struct wiphy *wiphy,
int ret = 0;
spin_lock_bh(&local->fq.lock);
- rcu_read_lock();
if (wdev) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
@@ -4676,7 +4960,6 @@ static int ieee80211_get_txq_stats(struct wiphy *wiphy,
}
out:
- rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
return ret;
@@ -5027,6 +5310,12 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
+ err = ieee80211_set_unsol_bcast_probe_resp(sdata,
+ &params->unsol_bcast_probe_resp,
+ link, link_conf, &changed);
+ if (err)
+ goto out;
+
err = ieee80211_set_color_change_beacon(link, params, &changed);
if (err)
goto out;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 3aaf5abf1acc..57065714cf8c 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -644,15 +644,28 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
}
-bool ieee80211_is_radar_required(struct ieee80211_local *local)
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+ struct cfg80211_scan_request *req)
{
+ struct wiphy *wiphy = local->hw.wiphy;
struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ int radio_idx;
lockdep_assert_wiphy(local->hw.wiphy);
+ if (!req)
+ return false;
+
for_each_sdata_link(local, link) {
- if (link->radar_required)
- return true;
+ if (link->radar_required) {
+ chan = link->conf->chanreq.oper.chan;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+
+ if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+ radio_idx))
+ return true;
+ }
}
return false;
@@ -720,7 +733,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
- ieee80211_hw_config(local, changed);
+ ieee80211_hw_config(local, -1, changed);
err = drv_add_chanctx(local, ctx);
if (err) {
@@ -886,7 +899,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
- if (conf) {
+ if (conf && !local->in_reconfig) {
curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
drv_unassign_vif_chanctx(local, sdata, link->conf, curr_ctx);
@@ -906,8 +919,9 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
/* succeeded, so commit it to the data structures */
conf = &new_ctx->conf;
- list_add(&link->assigned_chanctx_list,
- &new_ctx->assigned_links);
+ if (!local->in_reconfig)
+ list_add(&link->assigned_chanctx_list,
+ &new_ctx->assigned_links);
}
} else {
ret = 0;
@@ -1084,7 +1098,7 @@ void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
__ieee80211_link_copy_chanctx_to_vlans(link, clear);
}
-int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
+void ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_chanctx *ctx = link->reserved_chanctx;
@@ -1092,7 +1106,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
if (WARN_ON(!ctx))
- return -EINVAL;
+ return;
list_del(&link->reserved_chanctx_list);
link->reserved_chanctx = NULL;
@@ -1100,7 +1114,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) {
if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) {
if (WARN_ON(!ctx->replace_ctx))
- return -EINVAL;
+ return;
WARN_ON(ctx->replace_ctx->replace_state !=
IEEE80211_CHANCTX_WILL_BE_REPLACED);
@@ -1116,8 +1130,6 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
ieee80211_free_chanctx(sdata->local, ctx, false);
}
}
-
- return 0;
}
static struct ieee80211_chanctx *
@@ -1381,6 +1393,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
goto out;
}
+ link->radar_required = link->reserved_radar_required;
list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links);
rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf);
@@ -1909,7 +1922,8 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link,
if (ret < 0)
goto out;
- __ieee80211_link_release_channel(link, false);
+ if (!local->in_reconfig)
+ __ieee80211_link_release_channel(link, false);
ctx = ieee80211_find_chanctx(local, link, chanreq, mode);
/* Note: context is now reserved */
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index 5b81998cb0c9..ef7c1a68d88d 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Portions
- * Copyright (C) 2022 - 2024 Intel Corporation
+ * Copyright (C) 2022 - 2025 Intel Corporation
*/
#ifndef __MAC80211_DEBUG_H
#define __MAC80211_DEBUG_H
+#include <linux/once_lite.h>
#include <net/cfg80211.h>
#ifdef CONFIG_MAC80211_OCB_DEBUG
@@ -152,6 +153,8 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
+#define link_err_once(link, fmt, ...) \
+ DO_ONCE_LITE(link_err, link, fmt, ##__VA_ARGS__)
#define link_id_info(sdata, link_id, fmt, ...) \
do { \
if (ieee80211_vif_is_mld(&sdata->vif)) \
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 69e03630f64c..d02f07368c51 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -4,7 +4,7 @@
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 - 2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2018 - 2019, 2021-2025 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -82,7 +82,6 @@ static ssize_t aqm_read(struct file *file,
int len = 0;
spin_lock_bh(&local->fq.lock);
- rcu_read_lock();
len = scnprintf(buf, sizeof(buf),
"access name value\n"
@@ -105,7 +104,6 @@ static ssize_t aqm_read(struct file *file,
fq->limit,
fq->quantum);
- rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
return simple_read_from_buffer(user_buf, count, ppos,
@@ -490,7 +488,6 @@ static const char *hw_flag_names[] = {
FLAG(DETECTS_COLOR_COLLISION),
FLAG(MLO_MCAST_MULTI_LINK_TX),
FLAG(DISALLOW_PUNCTURING),
- FLAG(DISALLOW_PUNCTURING_5GHZ),
FLAG(HANDLES_QUIET_CSA),
FLAG(STRICT),
#undef FLAG
@@ -718,7 +715,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount);
DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount);
DEBUGFS_STATS_ADD(dot11TransmittedFrameCount);
- DEBUGFS_STATS_ADD(tx_handlers_drop);
DEBUGFS_STATS_ADD(tx_handlers_queued);
DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 54c479910d05..30a5a978a678 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -625,7 +625,6 @@ static ssize_t ieee80211_if_fmt_aqm(
txqi = to_txq_info(sdata->vif.txq);
spin_lock_bh(&local->fq.lock);
- rcu_read_lock();
len = scnprintf(buf,
buflen,
@@ -642,7 +641,6 @@ static ssize_t ieee80211_if_fmt_aqm(
txqi->tin.tx_bytes,
txqi->tin.tx_packets);
- rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
return len;
@@ -704,7 +702,7 @@ static ssize_t ieee80211_if_parse_tsf(
}
}
- ieee80211_recalc_dtim(local, sdata);
+ ieee80211_recalc_dtim(sdata, drv_get_tsf(local, sdata));
return buflen;
}
IEEE80211_IF_FILE_RW(tsf);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 49061bd4151b..ef75255d47d5 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -148,7 +148,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
return -ENOMEM;
spin_lock_bh(&local->fq.lock);
- rcu_read_lock();
p += scnprintf(p,
bufsz + buf - p,
@@ -178,7 +177,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
}
- rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 35349a7f16cb..ba9fba165926 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015 Intel Deutschland GmbH
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -515,6 +515,9 @@ int drv_set_key(struct ieee80211_local *local,
!(sdata->vif.active_links & BIT(key->link_id))))
return -ENOLINK;
+ if (fips_enabled)
+ return -EOPNOTSUPP;
+
trace_drv_set_key(local, cmd, sdata, sta, key);
ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
trace_drv_return_int(local, ret);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 307587c8a003..55105d238d6b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -8,6 +8,7 @@
#ifndef __MAC80211_DRIVER_OPS
#define __MAC80211_DRIVER_OPS
+#include <linux/fips.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "trace.h"
@@ -143,15 +144,16 @@ int drv_change_interface(struct ieee80211_local *local,
void drv_remove_interface(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
-static inline int drv_config(struct ieee80211_local *local, u32 changed)
+static inline int drv_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed)
{
int ret;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_config(local, changed);
- ret = local->ops->config(&local->hw, changed);
+ trace_drv_config(local, radio_idx, changed);
+ ret = local->ops->config(&local->hw, radio_idx, changed);
trace_drv_return_int(local, ret);
return ret;
}
@@ -387,45 +389,47 @@ static inline void drv_get_key_seq(struct ieee80211_local *local,
}
static inline int drv_set_frag_threshold(struct ieee80211_local *local,
- u32 value)
+ int radio_idx, u32 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_frag_threshold(local, value);
+ trace_drv_set_frag_threshold(local, radio_idx, value);
if (local->ops->set_frag_threshold)
- ret = local->ops->set_frag_threshold(&local->hw, value);
+ ret = local->ops->set_frag_threshold(&local->hw, radio_idx,
+ value);
trace_drv_return_int(local, ret);
return ret;
}
static inline int drv_set_rts_threshold(struct ieee80211_local *local,
- u32 value)
+ int radio_idx, u32 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_rts_threshold(local, value);
+ trace_drv_set_rts_threshold(local, radio_idx, value);
if (local->ops->set_rts_threshold)
- ret = local->ops->set_rts_threshold(&local->hw, value);
+ ret = local->ops->set_rts_threshold(&local->hw, radio_idx,
+ value);
trace_drv_return_int(local, ret);
return ret;
}
static inline int drv_set_coverage_class(struct ieee80211_local *local,
- s16 value)
+ int radio_idx, s16 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_coverage_class(local, value);
+ trace_drv_set_coverage_class(local, radio_idx, value);
if (local->ops->set_coverage_class)
- local->ops->set_coverage_class(&local->hw, value);
+ local->ops->set_coverage_class(&local->hw, radio_idx, value);
else
ret = -EOPNOTSUPP;
@@ -631,6 +635,25 @@ static inline void drv_sta_statistics(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline void drv_link_sta_statistics(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo)
+{
+ might_sleep();
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_link_sta_statistics(local, sdata, link_sta);
+ if (local->ops->link_sta_statistics)
+ local->ops->link_sta_statistics(&local->hw, &sdata->vif,
+ link_sta, link_sinfo);
+ trace_drv_return_void(local);
+}
+
int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_link_data *link, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -753,20 +776,21 @@ static inline int drv_set_antenna(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
if (local->ops->set_antenna)
- ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant);
+ ret = local->ops->set_antenna(&local->hw, -1, tx_ant, rx_ant);
trace_drv_set_antenna(local, tx_ant, rx_ant, ret);
return ret;
}
-static inline int drv_get_antenna(struct ieee80211_local *local,
+static inline int drv_get_antenna(struct ieee80211_local *local, int radio_idx,
u32 *tx_ant, u32 *rx_ant)
{
int ret = -EOPNOTSUPP;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
if (local->ops->get_antenna)
- ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant);
- trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret);
+ ret = local->ops->get_antenna(&local->hw, radio_idx,
+ tx_ant, rx_ant);
+ trace_drv_get_antenna(local, radio_idx, *tx_ant, *rx_ant, ret);
return ret;
}
@@ -879,6 +903,9 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return;
+ if (fips_enabled)
+ return;
+
trace_drv_set_rekey_data(local, sdata, data);
if (local->ops->set_rekey_data)
local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
@@ -1389,7 +1416,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_ftm_responder_stats *ftm_stats)
{
- u32 ret = -EOPNOTSUPP;
+ int ret = -EOPNOTSUPP;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 0397755a3bd1..3d365626faa4 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -48,8 +48,8 @@ static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
"rx_duplicates", "rx_fragments", "rx_dropped",
"tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
- "sta_state", "txrate", "rxrate", "signal",
- "channel", "noise", "ch_time", "ch_time_busy",
+ "tx_handlers_drop", "sta_state", "txrate", "rxrate",
+ "signal", "channel", "noise", "ch_time", "ch_time_busy",
"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
};
#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats)
@@ -120,6 +120,7 @@ static void ieee80211_get_stats(struct net_device *dev,
i = 0;
ADD_STA_STATS(&sta->deflink);
+ data[i++] = sdata->tx_handlers_drop;
data[i++] = sta->sta_state;
@@ -145,6 +146,7 @@ static void ieee80211_get_stats(struct net_device *dev,
sta_set_sinfo(sta, &sinfo, false);
i = 0;
ADD_STA_STATS(&sta->deflink);
+ data[i++] = sdata->tx_handlers_drop;
}
}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 32390d8a9d75..1c82a28b03de 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2024 Intel Corporation
+ * Copyright(c) 2020-2025 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -603,3 +603,41 @@ out:
}
/* this might change ... don't want non-open drivers using it */
EXPORT_SYMBOL_GPL(ieee80211_request_smps);
+
+void ieee80211_ht_handle_chanwidth_notif(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct link_sta_info *link_sta,
+ u8 chanwidth, enum nl80211_band band)
+{
+ enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
+ struct ieee80211_supported_band *sband;
+ struct sta_opmode_info sta_opmode = {};
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
+ max_bw = IEEE80211_STA_RX_BW_20;
+ else
+ max_bw = ieee80211_sta_cap_rx_bw(link_sta);
+
+ /* set cur_max_bandwidth and recalc sta bw */
+ link_sta->cur_max_bandwidth = max_bw;
+ new_bw = ieee80211_sta_cur_vht_bw(link_sta);
+
+ if (link_sta->pub->bandwidth == new_bw)
+ return;
+
+ link_sta->pub->bandwidth = new_bw;
+ sband = local->hw.wiphy->bands[band];
+ sta_opmode.bw =
+ ieee80211_sta_rx_bw_to_chan_width(link_sta);
+ sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
+
+ rate_control_rate_update(local, sband, link_sta,
+ IEEE80211_RC_BW_CHANGED);
+ cfg80211_sta_opmode_change_notify(sdata->dev,
+ sta->addr,
+ &sta_opmode,
+ GFP_KERNEL);
+}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 9ed87d6f5019..6e36b09fe97f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -635,7 +635,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sta->sdata == sdata &&
time_is_after_jiffies(last_active +
@@ -1228,7 +1228,7 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sdata != sta->sdata)
continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 30809f0b35f7..73fd86ec1bce 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -86,6 +86,14 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
+/*
+ * Current mac80211 implementation supports a maximum of 1600 AIDS
+ * for S1G interfaces. With regards to an S1G TIM, this covers 25 blocks
+ * as each block is 64 AIDs.
+ */
+#define IEEE80211_MAX_SUPPORTED_S1G_AID 1600
+#define IEEE80211_MAX_SUPPORTED_S1G_TIM_BLOCKS 25
+
enum ieee80211_status_data {
IEEE80211_STATUS_TYPE_MASK = 0x00f,
IEEE80211_STATUS_TYPE_INVALID = 0,
@@ -296,6 +304,14 @@ struct unsol_bcast_probe_resp_data {
u8 data[];
};
+struct s1g_short_beacon_data {
+ struct rcu_head rcu_head;
+ u8 *short_head;
+ u8 *short_tail;
+ int short_head_len;
+ int short_tail_len;
+};
+
struct ps_data {
/* yes, this looks ugly, but guarantees that we can later use
* bitmap_empty :)
@@ -306,6 +322,7 @@ struct ps_data {
atomic_t num_sta_ps; /* number of stations in PS mode */
int dtim_count;
bool dtim_bc_mc;
+ int sb_count; /* num short beacons til next long beacon */
};
struct ieee80211_if_ap {
@@ -968,11 +985,13 @@ struct ieee80211_if_mntr {
* struct ieee80211_if_nan - NAN state
*
* @conf: current NAN configuration
+ * @started: true iff NAN is started
* @func_lock: lock for @func_inst_ids
* @function_inst_ids: a bitmap of available instance_id's
*/
struct ieee80211_if_nan {
struct cfg80211_nan_conf conf;
+ bool started;
/* protects function_inst_ids */
spinlock_t func_lock;
@@ -1042,6 +1061,7 @@ struct ieee80211_link_data_ap {
struct probe_resp __rcu *probe_resp;
struct fils_discovery_data __rcu *fils_discovery;
struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
+ struct s1g_short_beacon_data __rcu *s1g_short_beacon;
/* to be used after channel switch. */
struct cfg80211_beacon_data *next_beacon;
@@ -1200,6 +1220,7 @@ struct ieee80211_sub_if_data {
} debugfs;
#endif
+ u32 tx_handlers_drop;
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
@@ -1226,8 +1247,25 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
if ((_link = wiphy_dereference((_local)->hw.wiphy, \
___sdata->link[___link_id])))
+/*
+ * for_each_sdata_link_rcu() must be used under RCU read lock.
+ */
+#define for_each_sdata_link_rcu(_local, _link) \
+ /* outer loop just to define the variables ... */ \
+ for (struct ieee80211_sub_if_data *___sdata = NULL; \
+ !___sdata; \
+ ___sdata = (void *)~0 /* always stop */) \
+ list_for_each_entry_rcu(___sdata, &(_local)->interfaces, list) \
+ if (ieee80211_sdata_running(___sdata)) \
+ for (int ___link_id = 0; \
+ ___link_id < ARRAY_SIZE((___sdata)->link); \
+ ___link_id++) \
+ if ((_link = rcu_dereference((___sdata)->link[___link_id])))
+
#define for_each_link_data(sdata, __link) \
- struct ieee80211_sub_if_data *__sdata = sdata; \
+ /* outer loop just to define the variable ... */ \
+ for (struct ieee80211_sub_if_data *__sdata = (sdata); __sdata; \
+ __sdata = NULL /* always stop */) \
for (int __link_id = 0; \
__link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \
if ((!(__sdata)->vif.valid_links || \
@@ -1235,6 +1273,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
((__link) = sdata_dereference((__sdata)->link[__link_id], \
(__sdata))))
+/*
+ * for_each_link_data_rcu should be used under RCU read lock.
+ */
+#define for_each_link_data_rcu(sdata, __link) \
+ /* outer loop just to define the variable ... */ \
+ for (struct ieee80211_sub_if_data *__sdata = (sdata); __sdata; \
+ __sdata = NULL /* always stop */) \
+ for (int __link_id = 0; \
+ __link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \
+ if ((!(__sdata)->vif.valid_links || \
+ (__sdata)->vif.valid_links & BIT(__link_id)) && \
+ ((__link) = rcu_dereference((__sdata)->link[__link_id]))) \
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -1403,8 +1454,6 @@ struct ieee80211_local {
bool rx_mcast_action_reg;
unsigned int filter_flags; /* FIF_* */
- bool wiphy_ciphers_allocated;
-
struct cfg80211_chan_def dflt_chandef;
bool emulate_chanctx;
@@ -1561,7 +1610,6 @@ struct ieee80211_local {
u32 dot11TransmittedFrameCount;
/* TX/RX handler statistics */
- unsigned int tx_handlers_drop;
unsigned int tx_handlers_queued;
unsigned int tx_handlers_drop_wep;
unsigned int tx_handlers_drop_not_assoc;
@@ -1627,8 +1675,6 @@ struct ieee80211_local {
struct idr ack_status_frames;
spinlock_t ack_status_lock;
- struct ieee80211_sub_if_data __rcu *p2p_sdata;
-
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
struct ieee80211_chan_req monitor_chanreq;
@@ -1872,7 +1918,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
struct ieee80211_rx_status *status,
unsigned int mpdu_len,
unsigned int mpdu_offset);
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed);
int ieee80211_hw_conf_chan(struct ieee80211_local *local);
void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
@@ -2205,6 +2252,12 @@ u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
enum nl80211_smps_mode
ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps);
+void ieee80211_ht_handle_chanwidth_notif(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta,
+ struct link_sta_info *link_sta,
+ u8 chanwidth, enum nl80211_band band);
+
/* VHT */
void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
@@ -2269,6 +2322,9 @@ void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_s1g_cap *s1g_cap_ie,
+ struct link_sta_info *link_sta);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -2542,7 +2598,7 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
}
int ieee80211_txq_setup_flows(struct ieee80211_local *local);
-void ieee80211_txq_set_params(struct ieee80211_local *local);
+void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx);
void ieee80211_txq_teardown_flows(struct ieee80211_local *local);
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
@@ -2638,6 +2694,8 @@ int ieee80211_put_eht_cap(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata,
const struct ieee80211_supported_band *sband,
const struct ieee80211_conn_settings *conn);
+int ieee80211_put_reg_conn(struct sk_buff *skb,
+ enum ieee80211_channel_flags flags);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2652,7 +2710,8 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
+bool ieee80211_chandef_s1g_oper(struct ieee80211_local *local,
+ const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef);
void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef,
struct ieee80211_conn_settings *conn);
@@ -2691,7 +2750,7 @@ ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
bool radar_required);
int __must_check
ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
-int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
+void ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
@@ -2712,7 +2771,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_link_data *rsvd_for,
bool check_reserved);
-bool ieee80211_is_radar_required(struct ieee80211_local *local);
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+ struct cfg80211_scan_request *req);
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+ struct cfg80211_scan_request *scan_req,
+ int radio_idx);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
@@ -2721,9 +2784,8 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
struct wiphy_work *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
struct cfg80211_csa_settings *csa_settings);
-
-void ieee80211_recalc_dtim(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_sb_count(struct ieee80211_sub_if_data *sdata, u64 tsf);
+void ieee80211_recalc_dtim(struct ieee80211_sub_if_data *sdata, u64 tsf);
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7c27f3cd841c..a7873832d4fa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -107,6 +107,7 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
{
bool working, scanning, active;
unsigned int led_trig_start = 0, led_trig_stop = 0;
+ struct ieee80211_sub_if_data *iter;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -117,6 +118,14 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
working = !local->ops->remain_on_channel &&
!list_empty(&local->roc_list);
+ list_for_each_entry(iter, &local->interfaces, list) {
+ if (iter->vif.type == NL80211_IFTYPE_NAN &&
+ iter->u.nan.started) {
+ working = true;
+ break;
+ }
+ }
+
scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
@@ -146,7 +155,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
{
u32 change = __ieee80211_recalc_idle(local, false);
if (change)
- ieee80211_hw_config(local, change);
+ ieee80211_hw_config(local, -1, change);
}
static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr,
@@ -611,10 +620,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
spin_unlock_bh(&sdata->u.nan.func_lock);
break;
- case NL80211_IFTYPE_P2P_DEVICE:
- /* relies on synchronize_rcu() below */
- RCU_INIT_POINTER(local->p2p_sdata, NULL);
- fallthrough;
default:
wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
/*
@@ -726,7 +731,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
/* do after stop to avoid reconfiguring when we stop anyway */
ieee80211_configure_filter(local);
- ieee80211_hw_config(local, hw_reconf_flags);
+ ieee80211_hw_config(local, -1, hw_reconf_flags);
if (local->virt_monitors == local->open_count)
ieee80211_add_virtual_monitor(local);
@@ -1150,6 +1155,8 @@ static void ieee80211_sdata_init(struct ieee80211_local *local,
{
sdata->local = local;
+ INIT_LIST_HEAD(&sdata->key_list);
+
/*
* Initialize the default link, so we can use link_id 0 for non-MLD,
* and that continues to work for non-MLD-aware drivers that use just
@@ -1403,6 +1410,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
ieee80211_recalc_idle(local);
netif_carrier_on(dev);
+ list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
break;
default:
if (coming_up) {
@@ -1466,17 +1474,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.type != NL80211_IFTYPE_STATION);
}
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_P2P_DEVICE:
- rcu_assign_pointer(local->p2p_sdata, sdata);
- break;
- case NL80211_IFTYPE_MONITOR:
- list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
- break;
- default:
- break;
- }
-
/*
* set_multicast_list will be invoked by the networking core
* which will check whether any increments here were done in
@@ -1491,7 +1488,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (local->open_count == 1)
ieee80211_hw_conf_init(local);
else if (hw_reconf_flags)
- ieee80211_hw_config(local, hw_reconf_flags);
+ ieee80211_hw_config(local, -1, hw_reconf_flags);
ieee80211_recalc_ps(local);
@@ -1555,6 +1552,35 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
}
}
} else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_HT) {
+ switch (mgmt->u.action.u.ht_smps.action) {
+ case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
+ u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
+ struct ieee80211_rx_status *status;
+ struct link_sta_info *link_sta;
+ struct sta_info *sta;
+
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+ if (!sta)
+ break;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ if (!status->link_valid)
+ link_sta = &sta->deflink;
+ else
+ link_sta = rcu_dereference_protected(sta->link[status->link_id],
+ lockdep_is_held(&local->hw.wiphy->mtx));
+ if (link_sta)
+ ieee80211_ht_handle_chanwidth_notif(local, sdata, sta,
+ link_sta, chanwidth,
+ status->band);
+ break;
+ }
+ default:
+ WARN_ON(1);
+ break;
+ }
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT) {
switch (mgmt->u.action.u.vht_group_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
@@ -2210,8 +2236,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ieee80211_init_frag_cache(&sdata->frags);
- INIT_LIST_HEAD(&sdata->key_list);
-
wiphy_delayed_work_init(&sdata->dec_tailroom_needed_wk,
ieee80211_delayed_tailroom_dec);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index dcf8643a0baa..b14e9cd9713f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020, 2022-2024 Intel Corporation
+ * Copyright 2018-2020, 2022-2025 Intel Corporation
*/
#include <crypto/utils.h>
@@ -510,7 +510,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
} else {
if (!new->local->wowlan)
ret = ieee80211_key_enable_hw_accel(new);
- else
+ else if (link_id < 0 || !sdata->vif.active_links ||
+ BIT(link_id) & sdata->vif.active_links)
new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
}
@@ -1353,38 +1354,14 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
}
EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
-void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
-{
- struct ieee80211_key *key;
-
- key = container_of(keyconf, struct ieee80211_key, conf);
-
- lockdep_assert_wiphy(key->local->hw.wiphy);
-
- /*
- * if key was uploaded, we assume the driver will/has remove(d)
- * it, so adjust bookkeeping accordingly
- */
- if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
- IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
- IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
- increment_tailroom_need_count(key->sdata);
- }
-
- ieee80211_key_free(key, false);
-}
-EXPORT_SYMBOL_GPL(ieee80211_remove_key);
-
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
+ u8 idx, u8 *key_data, u8 key_len,
int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *prev_key;
struct ieee80211_key *key;
int err;
struct ieee80211_link_data *link_data =
@@ -1400,8 +1377,37 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return ERR_PTR(-EINVAL);
- key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
- keyconf->keylen, keyconf->key,
+ if (WARN_ON(idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS))
+ return ERR_PTR(-EINVAL);
+
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[idx]);
+ if (!prev_key) {
+ if (idx < NUM_DEFAULT_KEYS) {
+ for (int i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ if (i == idx)
+ continue;
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[i]);
+ if (prev_key)
+ break;
+ }
+ } else {
+ /* For IGTK we have 4 and 5 and for BIGTK - 6 and 7 */
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[idx ^ 1]);
+ }
+ }
+
+ if (WARN_ON(!prev_key))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(key_len < prev_key->conf.keylen))
+ return ERR_PTR(-EINVAL);
+
+ key = ieee80211_key_alloc(prev_key->conf.cipher, idx,
+ prev_key->conf.keylen, key_data,
0, NULL);
if (IS_ERR(key))
return ERR_CAST(key);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d40c2bd3b50b..d71eabe5abf8 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -2,7 +2,7 @@
/*
* MLO link handling
*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -93,9 +93,6 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
if (link_id < 0)
link_id = 0;
- rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
- rcu_assign_pointer(sdata->link[link_id], link);
-
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
struct ieee80211_sub_if_data *ap_bss;
struct ieee80211_bss_conf *ap_bss_conf;
@@ -145,6 +142,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_link_debugfs_add(link);
}
+
+ rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
+ rcu_assign_pointer(sdata->link[link_id], link);
}
void ieee80211_link_stop(struct ieee80211_link_data *link)
@@ -368,6 +368,13 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
ieee80211_update_apvlan_links(sdata);
}
+ /*
+ * Ignore errors if we are only removing links as removal should
+ * always succeed
+ */
+ if (!new_links)
+ ret = 0;
+
if (ret) {
/* restore config */
memcpy(sdata->link, old_data, sizeof(old_data));
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 6b6de43d9420..eefa6f7e899b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <net/mac80211.h>
@@ -190,7 +190,8 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
return changed;
}
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed)
{
int ret = 0;
@@ -201,7 +202,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
- ret = drv_config(local, changed);
+ ret = drv_config(local, radio_idx, changed);
/*
* Goal:
* HW reconfiguration should never fail, the driver has told
@@ -235,7 +236,7 @@ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
if (!changed)
return 0;
- return drv_config(local, changed);
+ return drv_config(local, -1, changed);
}
int ieee80211_hw_conf_chan(struct ieee80211_local *local)
@@ -269,7 +270,7 @@ void ieee80211_hw_conf_init(struct ieee80211_local *local)
ctx ? &ctx->conf : NULL);
}
- WARN_ON(drv_config(local, changed));
+ WARN_ON(drv_config(local, -1, changed));
}
int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
@@ -407,9 +408,20 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS);
- if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (!changed)
return;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+ break;
+ default:
+ break;
+ }
+
if (!check_sdata_in_driver(sdata))
return;
@@ -734,6 +746,11 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4),
},
+ [NL80211_IFTYPE_NAN] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
+ },
};
static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
@@ -850,6 +867,14 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_CTS_PROT |
+ WIPHY_BSS_PARAM_SHORT_PREAMBLE |
+ WIPHY_BSS_PARAM_SHORT_SLOT_TIME |
+ WIPHY_BSS_PARAM_BASIC_RATES |
+ WIPHY_BSS_PARAM_AP_ISOLATE |
+ WIPHY_BSS_PARAM_HT_OPMODE |
+ WIPHY_BSS_PARAM_P2P_CTWINDOW |
+ WIPHY_BSS_PARAM_P2P_OPPPS;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
NL80211_FEATURE_SAE |
NL80211_FEATURE_HT_IBSS |
@@ -1024,12 +1049,9 @@ EXPORT_SYMBOL(ieee80211_alloc_hw_nm);
static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
{
- bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */
bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
- int r = 0, w = 0;
- u32 *suites;
static const u32 cipher_suites[] = {
- /* keep WEP first, it may be removed below */
+ /* keep WEP and TKIP first, they may be removed below */
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
@@ -1045,34 +1067,17 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
WLAN_CIPHER_SUITE_BIP_GMAC_256,
};
- if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) ||
- local->hw.wiphy->cipher_suites) {
- /* If the driver advertises, or doesn't support SW crypto,
- * we only need to remove WEP if necessary.
- */
- if (have_wep)
- return 0;
-
- /* well if it has _no_ ciphers ... fine */
- if (!local->hw.wiphy->n_cipher_suites)
- return 0;
-
- /* Driver provides cipher suites, but we need to exclude WEP */
- suites = kmemdup_array(local->hw.wiphy->cipher_suites,
- local->hw.wiphy->n_cipher_suites,
- sizeof(u32), GFP_KERNEL);
- if (!suites)
- return -ENOMEM;
-
- for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
- u32 suite = local->hw.wiphy->cipher_suites[r];
-
- if (suite == WLAN_CIPHER_SUITE_WEP40 ||
- suite == WLAN_CIPHER_SUITE_WEP104)
- continue;
- suites[w++] = suite;
- }
- } else {
+ if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) && fips_enabled) {
+ dev_err(local->hw.wiphy->dev.parent,
+ "Drivers with SW_CRYPTO_CONTROL cannot work with FIPS\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) &&
+ !local->hw.wiphy->cipher_suites))
+ return -EINVAL;
+
+ if (fips_enabled || !local->hw.wiphy->cipher_suites) {
/* assign the (software supported and perhaps offloaded)
* cipher suites
*/
@@ -1082,19 +1087,13 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
if (!have_mfp)
local->hw.wiphy->n_cipher_suites -= 4;
- if (!have_wep) {
- local->hw.wiphy->cipher_suites += 2;
- local->hw.wiphy->n_cipher_suites -= 2;
+ /* FIPS does not permit the use of RC4 */
+ if (fips_enabled) {
+ local->hw.wiphy->cipher_suites += 3;
+ local->hw.wiphy->n_cipher_suites -= 3;
}
-
- /* not dynamically allocated, so just return */
- return 0;
}
- local->hw.wiphy->cipher_suites = suites;
- local->hw.wiphy->n_cipher_suites = w;
- local->wiphy_ciphers_allocated = true;
-
return 0;
}
@@ -1125,7 +1124,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int result, i;
enum nl80211_band band;
int channels, max_bitrates;
- bool supp_ht, supp_vht, supp_he, supp_eht;
+ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
struct cfg80211_chan_def dflt_chandef = {};
if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
@@ -1178,9 +1177,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE)))
return -EINVAL;
- if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR)))
- return -EINVAL;
-
if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC)))
return -EINVAL;
@@ -1241,6 +1237,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = false;
supp_he = false;
supp_eht = false;
+ supp_s1g = false;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_supported_band *sband;
@@ -1252,11 +1249,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!dflt_chandef.chan) {
/*
* Assign the first enabled channel to dflt_chandef
- * from the list of channels
+ * from the list of channels. For S1G interfaces
+ * ensure it can be used as a primary.
*/
for (i = 0; i < sband->n_channels; i++)
if (!(sband->channels[i].flags &
- IEEE80211_CHAN_DISABLED))
+ (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_S1G_NO_PRIMARY)))
break;
/* if none found then use the first anyway */
if (i == sband->n_channels)
@@ -1288,6 +1287,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
+ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
for_each_sband_iftype_data(sband, i, iftd) {
u8 he_40_mhz_cap;
@@ -1359,7 +1359,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
GFP_KERNEL);
if (!local->int_scan_req)
return -ENOMEM;
- local->int_scan_req->n_channels = channels;
eth_broadcast_addr(local->int_scan_req->bssid);
@@ -1421,6 +1420,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->scan_ies_len +=
2 + sizeof(struct ieee80211_vht_cap);
+ if (supp_s1g)
+ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
+
/*
* HE cap element is variable in size - set len to allow max size */
if (supp_he) {
@@ -1650,10 +1652,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_led_exit(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
- if (local->wiphy_ciphers_allocated) {
- kfree(local->hw.wiphy->cipher_suites);
- local->wiphy_ciphers_allocated = false;
- }
kfree(local->int_scan_req);
return result;
}
@@ -1724,11 +1722,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
mutex_destroy(&local->iflist_mtx);
- if (local->wiphy_ciphers_allocated) {
- kfree(local->hw.wiphy->cipher_suites);
- local->wiphy_ciphers_allocated = false;
- }
-
idr_for_each(&local->ack_status_frames,
ieee80211_free_ack_frame, NULL);
idr_destroy(&local->ack_status_frames);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d00d9d413c5c..f37068a533f4 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -624,6 +624,9 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!sband)
return -EINVAL;
+ if (sband->band != NL80211_BAND_6GHZ)
+ return 0;
+
iftd = ieee80211_get_sband_iftype_data(sband,
NL80211_IFTYPE_MESH_POINT);
/* The device doesn't support HE in mesh mode or at all */
@@ -1202,7 +1205,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
return -ENOMEM;
}
- ieee80211_recalc_dtim(local, sdata);
+ ieee80211_recalc_dtim(sdata, drv_get_tsf(local, sdata));
ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
netif_carrier_on(sdata->dev);
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 20e022a03933..ebab1f0a0138 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -586,7 +586,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
- sta->mesh->aid);
+ sta->mesh->aid, false);
if (has_buffered)
mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2d46d4af60d7..3b5827ea438e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -180,10 +180,11 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
/* get special S1G case out of the way */
if (sband->band == NL80211_BAND_S1GHZ) {
- if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) {
- sdata_info(sdata,
- "Missing S1G Operation Element? Trying operating == primary\n");
- chandef->width = ieee80211_s1g_channel_width(channel);
+ if (!ieee80211_chandef_s1g_oper(sdata->local, elems->s1g_oper,
+ chandef)) {
+ /* Fallback to default 1MHz */
+ chandef->width = NL80211_CHAN_WIDTH_1;
+ chandef->s1g_primary_2mhz = false;
}
return IEEE80211_CONN_MODE_S1G;
@@ -776,10 +777,6 @@ static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING))
return false;
- if (chandef->punctured && chandef->chan->band == NL80211_BAND_5GHZ &&
- ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING_5GHZ))
- return false;
-
return true;
}
@@ -1050,6 +1047,14 @@ again:
ret = -EINVAL;
goto free;
}
+
+ chanreq->oper = *ap_chandef;
+ if (!cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_DISABLED)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
return elems;
case NL80211_BAND_6GHZ:
if (ap_mode < IEEE80211_CONN_MODE_HE) {
@@ -1193,6 +1198,14 @@ again:
"required MCSes not supported, disabling EHT\n");
}
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ channel->band != NL80211_BAND_2GHZ &&
+ conn->bw_limit == IEEE80211_CONN_BW_LIMIT_40) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ link_id_info(sdata, link_id,
+ "required bandwidth not supported, disabling EHT\n");
+ }
+
/* the mode can only decrease, so this must terminate */
if (ap_mode != conn->mode) {
kfree(elems);
@@ -1218,18 +1231,36 @@ EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_determine_chan_mode);
static int ieee80211_config_bw(struct ieee80211_link_data *link,
struct ieee802_11_elems *elems,
- bool update, u64 *changed,
- const char *frame)
+ bool update, u64 *changed, u16 stype)
{
struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_chan_req chanreq = {};
struct cfg80211_chan_def ap_chandef;
enum ieee80211_conn_mode ap_mode;
+ const char *frame;
u32 vht_cap_info = 0;
u16 ht_opmode;
int ret;
+ switch (stype) {
+ case IEEE80211_STYPE_BEACON:
+ frame = "beacon";
+ break;
+ case IEEE80211_STYPE_ASSOC_RESP:
+ frame = "assoc response";
+ break;
+ case IEEE80211_STYPE_REASSOC_RESP:
+ frame = "reassoc response";
+ break;
+ case IEEE80211_STYPE_ACTION:
+ /* the only action frame that gets here */
+ frame = "ML reconf response";
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* don't track any bandwidth changes in legacy/S1G modes */
if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY ||
link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G)
@@ -1278,7 +1309,9 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link,
ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
ieee80211_chandef_downgrade(&chanreq.oper, NULL);
- if (ap_chandef.chan->band == NL80211_BAND_6GHZ &&
+ /* TPE element is not present in (re)assoc/ML reconfig response */
+ if (stype == IEEE80211_STYPE_BEACON &&
+ ap_chandef.chan->band == NL80211_BAND_6GHZ &&
link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) {
ieee80211_rearrange_tpe(&elems->tpe, &ap_chandef,
&chanreq.oper);
@@ -1645,6 +1678,30 @@ static size_t ieee80211_add_before_he_elems(struct sk_buff *skb,
return noffset;
}
+static size_t ieee80211_add_before_reg_conn(struct sk_buff *skb,
+ const u8 *elems, size_t elems_len,
+ size_t offset)
+{
+ static const u8 before_reg_conn[] = {
+ /*
+ * no need to list the ones split off before HE
+ * or generated here
+ */
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_DH_PARAMETER,
+ WLAN_EID_EXTENSION, WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION,
+ };
+ size_t noffset;
+
+ if (!elems_len)
+ return offset;
+
+ noffset = ieee80211_ie_split(elems, elems_len, before_reg_conn,
+ ARRAY_SIZE(before_reg_conn), offset);
+ skb_put_data(skb, elems + offset, noffset - offset);
+
+ return noffset;
+}
+
#define PRESENT_ELEMS_MAX 8
#define PRESENT_ELEM_EXT_OFFS 0x100
@@ -1802,7 +1859,24 @@ ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata,
ieee80211_put_he_cap(skb, sdata, sband,
&assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
- ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
+ }
+
+ /*
+ * if present, add any custom IEs that go before regulatory
+ * connectivity element
+ */
+ offset = ieee80211_add_before_reg_conn(skb, extra_elems,
+ extra_elems_len, offset);
+
+ if (sband->band == NL80211_BAND_6GHZ) {
+ /*
+ * as per Section E.2.7 of IEEE 802.11 REVme D7.0, non-AP STA
+ * capable of operating on the 6 GHz band shall transmit
+ * regulatory connectivity element.
+ */
+ ieee80211_put_reg_conn(skb, chan->flags);
}
/*
@@ -1943,14 +2017,7 @@ ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
}
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
- /* Many APs have broken parsing of the extended MLD capa/ops field,
- * dropping (re-)association request frames or replying with association
- * response with a failure status if it's present. Without a clear
- * indication as to whether the AP supports parsing this field or not do
- * not include it in the common information unless strict mode is set.
- */
- if (ieee80211_hw_check(&local->hw, STRICT) &&
- assoc_data->ext_mld_capa_ops) {
+ if (assoc_data->ext_mld_capa_ops) {
ml_elem->control |=
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP);
common->len += 2;
@@ -2055,8 +2122,11 @@ ieee80211_link_common_elems_size(struct ieee80211_sub_if_data *sdata,
sizeof(struct ieee80211_he_mcs_nss_supp) +
IEEE80211_HE_PPE_THRES_MAX_LEN;
- if (sband->band == NL80211_BAND_6GHZ)
+ if (sband->band == NL80211_BAND_6GHZ) {
size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
+ /* reg connection */
+ size += 4;
+ }
size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
sizeof(struct ieee80211_eht_mcs_nss_supp) +
@@ -2130,11 +2200,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
2 + /* ext capa & op */
2; /* EML capa */
- /*
- * The capability elements were already considered above;
- * note this over-estimates a bit because there's no
- * STA profile for the assoc link.
- */
+ /* The capability elements were already considered above */
size += (n_links - 1) *
(1 + 1 + /* subelement ID/length */
2 + /* STA control */
@@ -2381,9 +2447,26 @@ static void ieee80211_csa_switch_work(struct wiphy *wiphy,
* update cfg80211 directly.
*/
if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) {
+ struct link_sta_info *link_sta;
+ struct sta_info *ap_sta;
+
link->conf->chanreq = link->csa.chanreq;
cfg80211_ch_switch_notify(sdata->dev, &link->csa.chanreq.oper,
link->link_id);
+ link->conf->csa_active = false;
+
+ ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+ if (WARN_ON(!ap_sta))
+ return;
+
+ link_sta = wiphy_dereference(wiphy,
+ ap_sta->link[link->link_id]);
+ if (WARN_ON(!link_sta))
+ return;
+
+ link_sta->pub->bandwidth =
+ _ieee80211_sta_cur_vht_bw(link_sta,
+ &link->csa.chanreq.oper);
return;
}
@@ -2439,6 +2522,21 @@ static void ieee80211_csa_switch_work(struct wiphy *wiphy,
}
}
+ /*
+ * It is not necessary to reset these timers if any link does not
+ * have an active CSA and that link still receives the beacons
+ * when other links have active CSA.
+ */
+ for_each_link_data(sdata, link) {
+ if (!link->conf->csa_active)
+ return;
+ }
+
+ /*
+ * Reset the beacon monitor and connection monitor timers when CSA
+ * is active for all links in MLO when channel switch occurs in all
+ * the links.
+ */
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
}
@@ -2515,7 +2613,8 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
if (!local->ops->abort_channel_switch)
return;
- ieee80211_link_unreserve_chanctx(link);
+ if (rcu_access_pointer(link->conf->chanctx_conf))
+ ieee80211_link_unreserve_chanctx(link);
ieee80211_vif_unblock_queues_csa(sdata);
@@ -3181,7 +3280,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
return;
conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -3193,7 +3292,7 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
ieee80211_enable_ps(local, local->ps_sdata);
} else if (conf->flags & IEEE80211_CONF_PS) {
conf->flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
timer_delete_sync(&local->dynamic_ps_timer);
wiphy_work_cancel(local->hw.wiphy,
&local->dynamic_ps_enable_work);
@@ -3302,7 +3401,7 @@ void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy,
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
ieee80211_wake_queues_by_reason(&local->hw,
@@ -3377,7 +3476,7 @@ void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy,
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -3934,6 +4033,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(local->hw.wiphy);
+ if (frame_buf)
+ memset(frame_buf, 0, IEEE80211_DEAUTH_FRAME_LEN);
+
if (WARN_ON(!ap_sta))
return;
@@ -3986,7 +4088,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
*/
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
local->ps_sdata = NULL;
@@ -4282,9 +4384,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
/*
* Try sending broadcast probe requests for the last three
* probe requests after the first ones failed since some
@@ -4330,9 +4429,6 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
if (!ieee80211_sdata_running(sdata))
return;
@@ -4734,6 +4830,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_prep_tx_info info = {
.subtype = IEEE80211_STYPE_AUTH,
};
+ bool sae_need_confirm = false;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -4779,6 +4876,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY;
ifmgd->auth_data->timeout_started = true;
run_again(sdata, ifmgd->auth_data->timeout);
+ if (auth_transaction == 1)
+ sae_need_confirm = true;
goto notify_driver;
}
@@ -4822,6 +4921,9 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_mark_sta_auth(sdata))
return; /* ignore frame -- wait for timeout */
} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
+ auth_transaction == 1) {
+ sae_need_confirm = true;
+ } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
auth_transaction == 2) {
sdata_info(sdata, "SAE peer confirmed\n");
ifmgd->auth_data->peer_confirmed = true;
@@ -4829,7 +4931,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
notify_driver:
- drv_mgd_complete_tx(sdata->local, sdata, &info);
+ if (!sae_need_confirm)
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
}
#define case_WLAN(type) \
@@ -5291,7 +5394,9 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
/* check/update if AP changed anything in assoc response vs. scan */
if (ieee80211_config_bw(link, elems,
link_id == assoc_data->assoc_link_id,
- changed, "assoc response")) {
+ changed,
+ le16_to_cpu(mgmt->frame_control) &
+ IEEE80211_FCTL_STYPE)) {
ret = false;
goto out;
}
@@ -5399,6 +5504,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
bss_conf->epcs_support = false;
}
+ if (elems->s1g_oper &&
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G &&
+ elems->s1g_capab)
+ ieee80211_s1g_cap_to_sta_s1g_cap(sdata, elems->s1g_capab,
+ link_sta);
+
bss_conf->twt_broadcast =
ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta);
@@ -5627,7 +5738,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
ies->data, ies->len);
- if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap))
+ if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap) + 1)
return chains;
/* skip one byte ext_tag_id */
@@ -5919,6 +6030,7 @@ ieee80211_ap_power_type(u8 control)
return IEEE80211_REG_LPI_AP;
case IEEE80211_6GHZ_CTRL_REG_SP_AP:
case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD:
return IEEE80211_REG_SP_AP;
case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
return IEEE80211_REG_VLP_AP;
@@ -6253,6 +6365,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
};
u8 ap_mld_addr[ETH_ALEN] __aligned(2);
unsigned int link_id;
+ u16 max_aid = IEEE80211_MAX_AID;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -6279,10 +6392,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control);
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
- if (assoc_data->s1g)
+ if (assoc_data->s1g) {
elem_start = mgmt->u.s1g_assoc_resp.variable;
- else
+ max_aid = IEEE80211_MAX_SUPPORTED_S1G_AID;
+ } else {
elem_start = mgmt->u.assoc_resp.variable;
+ }
/*
* Note: this may not be perfect, AP might misbehave - if
@@ -6306,16 +6421,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (elems->aid_resp)
aid = le16_to_cpu(elems->aid_resp->aid);
- else if (assoc_data->s1g)
- aid = 0; /* TODO */
else
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
/*
- * The 5 MSB of the AID field are reserved
- * (802.11-2016 9.4.1.8 AID field)
+ * The 5 MSB of the AID field are reserved for a non-S1G STA. For
+ * an S1G STA the 3 MSBs are reserved.
+ * (802.11-2016 9.4.1.8 AID field).
*/
- aid &= 0x7ff;
+ aid &= assoc_data->s1g ? 0x1fff : 0x7ff;
sdata_info(sdata,
"RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
@@ -6352,7 +6466,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
- if (aid == 0 || aid > IEEE80211_MAX_AID) {
+ if (aid == 0 || aid > max_aid) {
sdata_info(sdata,
"invalid AID value %d (out of range), turn off PS\n",
aid);
@@ -6390,6 +6504,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
}
sdata->vif.cfg.aid = aid;
+ sdata->vif.cfg.s1g = assoc_data->s1g;
if (!ieee80211_assoc_success(sdata, mgmt, elems,
elem_start, elem_len)) {
@@ -7186,6 +7301,38 @@ static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata,
return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
}
+static bool
+ieee80211_rx_beacon_freq_valid(struct ieee80211_local *local,
+ struct ieee80211_mgmt *mgmt,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_chanctx_conf *chanctx)
+{
+ u32 pri_2mhz_khz;
+ struct ieee80211_channel *s1g_sibling_1mhz;
+ u32 pri_khz = ieee80211_channel_to_khz(chanctx->def.chan);
+ u32 rx_khz = ieee80211_rx_status_to_khz(rx_status);
+
+ if (rx_khz == pri_khz)
+ return true;
+
+ if (!chanctx->def.s1g_primary_2mhz)
+ return false;
+
+ /*
+ * If we have an S1G interface with a 2MHz primary, beacons are
+ * sent on the center frequency of the 2MHz primary. Find the sibling
+ * 1MHz channel and calculate the 2MHz primary center frequency.
+ */
+ s1g_sibling_1mhz = cfg80211_s1g_get_primary_sibling(local->hw.wiphy,
+ &chanctx->def);
+ if (!s1g_sibling_1mhz)
+ return false;
+
+ pri_2mhz_khz =
+ (pri_khz + ieee80211_channel_to_khz(s1g_sibling_1mhz)) / 2;
+ return rx_khz == pri_2mhz_khz;
+}
+
static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
@@ -7195,6 +7342,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
struct ieee80211_mgmt *mgmt = (void *) hdr;
+ struct ieee80211_ext *ext = NULL;
size_t baselen;
struct ieee802_11_elems *elems;
struct ieee80211_local *local = sdata->local;
@@ -7220,7 +7368,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
/* Process beacon from the current BSS */
bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
- struct ieee80211_ext *ext = (void *) mgmt;
+ ext = (void *)mgmt;
variable = ext->u.s1g_beacon.variable +
ieee80211_s1g_optional_len(ext->frame_control);
}
@@ -7239,8 +7387,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
return;
}
- if (ieee80211_rx_status_to_khz(rx_status) !=
- ieee80211_channel_to_khz(chanctx_conf->def.chan)) {
+ if (!ieee80211_rx_beacon_freq_valid(local, mgmt, rx_status,
+ chanctx_conf)) {
rcu_read_unlock();
return;
}
@@ -7336,11 +7484,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
- ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) {
+ ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid,
+ vif_cfg->s1g)) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local,
+ ieee80211_hw_config(local, -1,
IEEE80211_CONF_CHANGE_PS);
}
ieee80211_send_nullfunc(local, sdata, false);
@@ -7407,7 +7556,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) ||
- ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ (ext && ieee80211_is_s1g_short_beacon(ext->frame_control,
+ parse_params.start,
+ parse_params.len)))
goto free;
link->u.mgd.beacon_crc = ncrc;
link->u.mgd.beacon_crc_valid = true;
@@ -7476,7 +7627,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
- if (ieee80211_config_bw(link, elems, true, &changed, "beacon")) {
+ if (ieee80211_config_bw(link, elems, true, &changed,
+ IEEE80211_STYPE_BEACON)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -8383,16 +8535,32 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
}
}
+static bool
+ieee80211_is_csa_in_progress(struct ieee80211_sub_if_data *sdata)
+{
+ /*
+ * In MLO, check the CSA flags 'active' and 'waiting_bcn' for all
+ * the links.
+ */
+ struct ieee80211_link_data *link;
+
+ guard(rcu)();
+
+ for_each_link_data_rcu(sdata, link) {
+ if (!(link->conf->csa_active &&
+ !link->u.mgd.csa.waiting_bcn))
+ return false;
+ }
+
+ return true;
+}
+
static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
timer_container_of(sdata, t, u.mgd.bcn_mon_timer);
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
- if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa.waiting_bcn)
+ if (ieee80211_is_csa_in_progress(sdata))
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
@@ -8403,36 +8571,69 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
&sdata->u.mgd.beacon_connection_loss_work);
}
+static unsigned long
+ieee80211_latest_active_link_conn_timeout(struct ieee80211_sub_if_data *sdata)
+{
+ unsigned long latest_timeout = jiffies;
+ unsigned int link_id;
+ struct sta_info *sta;
+
+ guard(rcu)();
+
+ sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+ if (!sta)
+ return 0;
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->link);
+ link_id++) {
+ struct link_sta_info *link_sta;
+ unsigned long timeout;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ timeout = link_sta->status_stats.last_ack;
+ if (time_before(timeout, link_sta->rx_stats.last_rx))
+ timeout = link_sta->rx_stats.last_rx;
+
+ timeout += IEEE80211_CONNECTION_IDLE_TIME;
+
+ /*
+ * latest_timeout holds the timeout of the link
+ * that will expire last among all links in an
+ * non-AP MLD STA. This ensures that the connection
+ * monitor timer is only reset if at least one link
+ * is still active, and it is scheduled to fire at
+ * the latest possible timeout.
+ */
+ if (time_after(timeout, latest_timeout))
+ latest_timeout = timeout;
+ }
+
+ return latest_timeout;
+}
+
static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
timer_container_of(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- unsigned long timeout;
-
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
- return;
+ unsigned long latest_timeout;
- if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa.waiting_bcn)
+ if (ieee80211_is_csa_in_progress(sdata))
return;
- sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
- if (!sta)
- return;
+ latest_timeout = ieee80211_latest_active_link_conn_timeout(sdata);
- timeout = sta->deflink.status_stats.last_ack;
- if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx))
- timeout = sta->deflink.rx_stats.last_rx;
- timeout += IEEE80211_CONNECTION_IDLE_TIME;
-
- /* If timeout is after now, then update timer to fire at
+ /*
+ * If latest timeout is after now, then update timer to fire at
* the later date, but do not actually probe at this time.
*/
- if (time_is_after_jiffies(timeout)) {
- mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
+ if (time_is_after_jiffies(latest_timeout)) {
+ mod_timer(&ifmgd->conn_mon_timer,
+ round_jiffies_up(latest_timeout));
return;
}
@@ -8692,21 +8893,33 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
bool have_sta = false;
bool mlo;
int err;
+ u16 new_links;
if (link_id >= 0) {
mlo = true;
if (WARN_ON(!ap_mld_addr))
return -EINVAL;
- err = ieee80211_vif_set_links(sdata, BIT(link_id), 0);
+ new_links = BIT(link_id);
} else {
if (WARN_ON(ap_mld_addr))
return -EINVAL;
ap_mld_addr = cbss->bssid;
- err = ieee80211_vif_set_links(sdata, 0, 0);
+ new_links = 0;
link_id = 0;
mlo = false;
}
+ if (assoc) {
+ rcu_read_lock();
+ have_sta = sta_info_get(sdata, ap_mld_addr);
+ rcu_read_unlock();
+ }
+
+ if (mlo && !have_sta &&
+ WARN_ON(sdata->vif.valid_links || sdata->vif.active_links))
+ return -EINVAL;
+
+ err = ieee80211_vif_set_links(sdata, new_links, 0);
if (err)
return err;
@@ -8727,12 +8940,6 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
goto out_err;
}
- if (assoc) {
- rcu_read_lock();
- have_sta = sta_info_get(sdata, ap_mld_addr);
- rcu_read_unlock();
- }
-
if (!have_sta) {
if (mlo)
new_sta = sta_info_alloc_with_link(sdata, ap_mld_addr,
@@ -9332,6 +9539,39 @@ out_rcu:
return err;
}
+static bool
+ieee80211_mgd_assoc_bss_has_mld_ext_capa_ops(struct cfg80211_assoc_request *req)
+{
+ const struct cfg80211_bss_ies *ies;
+ struct cfg80211_bss *bss;
+ const struct element *ml;
+
+ /* not an MLO connection if link_id < 0, so irrelevant */
+ if (req->link_id < 0)
+ return false;
+
+ bss = req->links[req->link_id].bss;
+
+ guard(rcu)();
+ ies = rcu_dereference(bss->ies);
+ for_each_element_extid(ml, WLAN_EID_EXT_EHT_MULTI_LINK,
+ ies->data, ies->len) {
+ const struct ieee80211_multi_link_elem *mle;
+
+ if (!ieee80211_mle_type_ok(ml->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ ml->datalen - 1))
+ continue;
+
+ mle = (void *)(ml->data + 1);
+ if (mle->control & cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+ return true;
+ }
+
+ return false;
+
+}
+
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_assoc_request *req)
{
@@ -9384,7 +9624,17 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
else
memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN);
- assoc_data->ext_mld_capa_ops = cpu_to_le16(req->ext_mld_capa_ops);
+ /*
+ * Many APs have broken parsing of the extended MLD capa/ops field,
+ * dropping (re-)association request frames or replying with association
+ * response with a failure status if it's present.
+ * Set our value from the userspace request only in strict mode or if
+ * the AP also had that field present.
+ */
+ if (ieee80211_hw_check(&local->hw, STRICT) ||
+ ieee80211_mgd_assoc_bss_has_mld_ext_capa_ops(req))
+ assoc_data->ext_mld_capa_ops =
+ cpu_to_le16(req->ext_mld_capa_ops);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -10027,7 +10277,6 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata,
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!add_links_data->link[link_id].bss ||
!(sdata->u.mgd.reconf.added_links & BIT(link_id)))
-
continue;
valid_links |= BIT(link_id);
@@ -10699,8 +10948,8 @@ static void ieee80211_ml_epcs(struct ieee80211_sub_if_data *sdata,
*/
for_each_mle_subelement(sub, (const u8 *)elems->ml_epcs,
elems->ml_epcs_len) {
+ struct ieee802_11_elems *link_elems __free(kfree) = NULL;
struct ieee80211_link_data *link;
- struct ieee802_11_elems *link_elems __free(kfree);
u8 *pos = (void *)sub->data;
u16 control;
ssize_t len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 2b9abc27462e..ae82533e3c02 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -8,7 +8,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2019, 2022-2024 Intel Corporation
+ * Copyright (C) 2019, 2022-2025 Intel Corporation
*/
#include <linux/export.h>
#include <net/mac80211.h>
@@ -39,7 +39,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
offchannel_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
if (!offchannel_ps_enabled ||
@@ -567,6 +567,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
{
struct ieee80211_roc_work *roc, *tmp;
bool queued = false, combine_started = true;
+ struct cfg80211_scan_request *req;
int ret;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -612,9 +613,11 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
roc->mgmt_tx_cookie = *cookie;
}
+ req = wiphy_dereference(local->hw.wiphy, local->scan_req);
+
/* if there's no need to queue, handle it immediately */
if (list_empty(&local->roc_list) &&
- !local->scanning && !ieee80211_is_radar_required(local)) {
+ !local->scanning && !ieee80211_is_radar_required(local, req)) {
/* if not HW assist, just queue & schedule work */
if (!local->ops->remain_on_channel) {
list_add_tail(&roc->list, &local->roc_list);
@@ -894,6 +897,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
need_offchan = true;
break;
case NL80211_IFTYPE_NAN:
+ break;
default:
return -EOPNOTSUPP;
}
@@ -907,6 +911,8 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
/* Check if the operating channel is the requested channel */
if (!params->chan && mlo_sta) {
need_offchan = false;
+ } else if (sdata->vif.type == NL80211_IFTYPE_NAN) {
+ /* Frames can be sent during NAN schedule */
} else if (!need_offchan) {
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
int i;
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
index 96584b39215e..c5e0f7f46004 100644
--- a/net/mac80211/parse.c
+++ b/net/mac80211/parse.c
@@ -758,7 +758,6 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
{
const struct element *elem, *sub;
size_t profile_len = 0;
- bool found = false;
if (!bss || !bss->transmitted_bss)
return profile_len;
@@ -809,15 +808,14 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
index[2],
new_bssid);
if (ether_addr_equal(new_bssid, bss->bssid)) {
- found = true;
elems->bssid_index_len = index[1];
elems->bssid_index = (void *)&index[2];
- break;
+ return profile_len;
}
}
}
- return found ? profile_len : 0;
+ return 0;
}
static void
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index a9cc832240a5..5a508d99e84f 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -108,7 +108,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
sdata->u.mgd.powersave &&
!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local,
+ ieee80211_hw_config(local, -1,
IEEE80211_CONF_CHANGE_PS);
}
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3cb2ad6d0b28..e441f8541603 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,7 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2019, 2022-2024 Intel Corporation
+ * Copyright (C) 2019, 2022-2025 Intel Corporation
*/
#include <linux/kernel.h>
@@ -98,6 +98,9 @@ void rate_control_tx_status(struct ieee80211_local *local,
if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
return;
+ if (st->info->band >= NUM_NL80211_BANDS)
+ return;
+
sband = local->hw.wiphy->bands[st->info->band];
spin_lock_bh(&sta->rate_ctrl_lock);
@@ -419,6 +422,9 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
int mcast_rate;
bool use_basicrate = false;
+ if (!sband)
+ return false;
+
if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
__rate_control_send_low(txrc->hw, sband, pubsta, info,
txrc->rate_idx_mask);
@@ -898,6 +904,9 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
return;
sdata = vif_to_sdata(vif);
+ if (info->band >= NUM_NL80211_BANDS)
+ return;
+
sband = sdata->local->hw.wiphy->bands[info->band];
if (ieee80211_is_tx_data(skb))
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 09beb65d6108..6af43dfefdd6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -231,8 +231,19 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
skb_queue_tail(&sdata->skb_queue, skb);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
- if (sta)
- sta->deflink.rx_stats.packets++;
+ if (sta) {
+ struct link_sta_info *link_sta_info;
+
+ if (link_id >= 0) {
+ link_sta_info = rcu_dereference(sta->link[link_id]);
+ if (!link_sta_info)
+ return;
+ } else {
+ link_sta_info = &sta->deflink;
+ }
+
+ link_sta_info->rx_stats.packets++;
+ }
}
static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
@@ -1521,9 +1532,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
}
if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
- cfg80211_rx_spurious_frame(rx->sdata->dev,
- hdr->addr2,
- GFP_ATOMIC))
+ cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2,
+ rx->link_id, GFP_ATOMIC))
return RX_DROP_U_SPURIOUS;
return RX_DROP;
@@ -1861,7 +1871,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
cfg80211_rx_unexpected_4addr_frame(
rx->sdata->dev, sta->sta.addr,
- GFP_ATOMIC);
+ rx->link_id, GFP_ATOMIC);
return RX_DROP_U_UNEXPECTED_4ADDR_FRAME;
}
/*
@@ -3022,7 +3032,6 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
- ieee80211_rx_result res;
struct ethhdr ethhdr;
const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
@@ -3084,24 +3093,18 @@ __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
- res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
- switch (res) {
+ switch (ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb)) {
case RX_QUEUED:
- continue;
- case RX_CONTINUE:
break;
+ case RX_CONTINUE:
+ if (ieee80211_frame_allowed(rx, fc)) {
+ ieee80211_deliver_skb(rx);
+ break;
+ }
+ fallthrough;
default:
- goto free;
+ dev_kfree_skb(rx->skb);
}
-
- if (!ieee80211_frame_allowed(rx, fc))
- goto free;
-
- ieee80211_deliver_skb(rx);
- continue;
-
-free:
- dev_kfree_skb(rx->skb);
}
return RX_QUEUED;
@@ -3187,7 +3190,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (rx->sta &&
!test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
cfg80211_rx_unexpected_4addr_frame(
- rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
+ rx->sdata->dev, rx->sta->sta.addr, rx->link_id,
+ GFP_ATOMIC);
return RX_DROP;
}
@@ -3576,41 +3580,18 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
- struct ieee80211_supported_band *sband;
u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
- enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
- struct sta_opmode_info sta_opmode = {};
+
+ if (chanwidth != IEEE80211_HT_CHANWIDTH_20MHZ &&
+ chanwidth != IEEE80211_HT_CHANWIDTH_ANY)
+ goto invalid;
/* If it doesn't support 40 MHz it can't change ... */
if (!(rx->link_sta->pub->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40))
goto handled;
- if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
- max_bw = IEEE80211_STA_RX_BW_20;
- else
- max_bw = ieee80211_sta_cap_rx_bw(rx->link_sta);
-
- /* set cur_max_bandwidth and recalc sta bw */
- rx->link_sta->cur_max_bandwidth = max_bw;
- new_bw = ieee80211_sta_cur_vht_bw(rx->link_sta);
-
- if (rx->link_sta->pub->bandwidth == new_bw)
- goto handled;
-
- rx->link_sta->pub->bandwidth = new_bw;
- sband = rx->local->hw.wiphy->bands[status->band];
- sta_opmode.bw =
- ieee80211_sta_rx_bw_to_chan_width(rx->link_sta);
- sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
-
- rate_control_rate_update(local, sband, rx->link_sta,
- IEEE80211_RC_BW_CHANGED);
- cfg80211_sta_opmode_change_notify(sdata->dev,
- rx->sta->addr,
- &sta_opmode,
- GFP_ATOMIC);
- goto handled;
+ goto queue;
}
default:
goto invalid;
@@ -4234,10 +4215,16 @@ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
rx->link_sta = NULL;
}
- if (link_id < 0)
- rx->link = &rx->sdata->deflink;
- else if (!ieee80211_rx_data_set_link(rx, link_id))
+ if (link_id < 0) {
+ if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
+ sta && !sta->sta.valid_links)
+ rx->link =
+ rcu_dereference(rx->sdata->link[sta->deflink.link_id]);
+ else
+ rx->link = &rx->sdata->deflink;
+ } else if (!ieee80211_rx_data_set_link(rx, link_id)) {
return false;
+ }
return true;
}
@@ -4432,6 +4419,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
if (!multicast &&
!ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
return false;
+ /* reject invalid/our STA address */
+ if (!is_valid_ether_addr(hdr->addr2) ||
+ ether_addr_equal(sdata->dev->dev_addr, hdr->addr2))
+ return false;
if (!rx->sta) {
int rate_idx;
if (status->encoding != RX_ENC_LEGACY)
@@ -4511,8 +4502,16 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
(ieee80211_is_auth(hdr->frame_control) &&
ether_addr_equal(sdata->vif.addr, hdr->addr1));
case NL80211_IFTYPE_NAN:
- /* Currently no frames on NAN interface are allowed */
- return false;
+ /* Accept only frames that are addressed to the NAN cluster
+ * (based on the Cluster ID). From these frames, accept only
+ * action frames or authentication frames that are addressed to
+ * the local NAN interface.
+ */
+ return memcmp(sdata->wdev.u.nan.cluster_id,
+ hdr->addr3, ETH_ALEN) == 0 &&
+ (ieee80211_is_public_action(hdr, skb->len) ||
+ (ieee80211_is_auth(hdr->frame_control) &&
+ ether_addr_equal(sdata->vif.addr, hdr->addr1)));
default:
break;
}
@@ -5115,8 +5114,24 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
sta = sta_info_get_bss(rx->sdata, hdr->addr2);
- if (status->link_valid)
+ if (status->link_valid) {
link_id = status->link_id;
+ } else if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
+ status->freq) {
+ struct ieee80211_link_data *link;
+ struct ieee80211_chanctx_conf *conf;
+
+ for_each_link_data_rcu(rx->sdata, link) {
+ conf = rcu_dereference(link->conf->chanctx_conf);
+ if (!conf || !conf->def.chan)
+ continue;
+
+ if (status->freq == conf->def.chan->center_freq) {
+ link_id = link->link_id;
+ break;
+ }
+ }
+ }
}
if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
@@ -5223,12 +5238,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
rx.sdata = prev_sta->sdata;
+ if (!status->link_valid && prev_sta->sta.mlo) {
+ struct link_sta_info *link_sta;
+
+ link_sta = link_sta_info_get_bss(rx.sdata,
+ hdr->addr2);
+ if (!link_sta)
+ continue;
+
+ link_id = link_sta->link_id;
+ }
+
if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
- if (!status->link_valid && prev_sta->sta.mlo)
- continue;
-
ieee80211_prepare_and_rx_handle(&rx, skb, false);
prev_sta = sta;
@@ -5236,10 +5259,18 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (prev_sta) {
rx.sdata = prev_sta->sdata;
- if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
- goto out;
+ if (!status->link_valid && prev_sta->sta.mlo) {
+ struct link_sta_info *link_sta;
- if (!status->link_valid && prev_sta->sta.mlo)
+ link_sta = link_sta_info_get_bss(rx.sdata,
+ hdr->addr2);
+ if (!link_sta)
+ goto out;
+
+ link_id = link_sta->link_id;
+ }
+
+ if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
goto out;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c
index d4ed0c0a335c..1f68df6e8067 100644
--- a/net/mac80211/s1g.c
+++ b/net/mac80211/s1g.c
@@ -194,3 +194,29 @@ void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
break;
}
}
+
+void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_s1g_cap *s1g_cap_ie,
+ struct link_sta_info *link_sta)
+{
+ struct ieee80211_sta_s1g_cap *s1g_cap = &link_sta->pub->s1g_cap;
+
+ memset(s1g_cap, 0, sizeof(*s1g_cap));
+
+ memcpy(s1g_cap->cap, s1g_cap_ie->capab_info, sizeof(s1g_cap->cap));
+ memcpy(s1g_cap->nss_mcs, s1g_cap_ie->supp_mcs_nss,
+ sizeof(s1g_cap->nss_mcs));
+
+ s1g_cap->s1g = true;
+
+ /* Maximum MPDU length is 1 bit for S1G */
+ if (s1g_cap->cap[3] & S1G_CAP3_MAX_MPDU_LEN) {
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ } else {
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+
+ ieee80211_sta_recalc_aggregates(&link_sta->sta->sta);
+}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index cd8385ecafd9..bb9563f50e7b 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/if_arp.h>
@@ -586,7 +586,8 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
return 0;
}
-static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
@@ -594,7 +595,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(local->hw.wiphy);
- if (!ieee80211_is_radar_required(local))
+ if (!ieee80211_is_radar_required(local, req))
return true;
if (!regulatory_pre_cac_allowed(local->hw.wiphy))
@@ -610,9 +611,10 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
}
static bool ieee80211_can_scan(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_scan_request *req)
{
- if (!__ieee80211_can_leave_ch(sdata))
+ if (!__ieee80211_can_leave_ch(sdata, req))
return false;
if (!list_empty(&local->roc_list))
@@ -627,15 +629,19 @@ static bool ieee80211_can_scan(struct ieee80211_local *local,
void ieee80211_run_deferred_scan(struct ieee80211_local *local)
{
+ struct cfg80211_scan_request *req;
+
lockdep_assert_wiphy(local->hw.wiphy);
if (!local->scan_req || local->scanning)
return;
+ req = wiphy_dereference(local->hw.wiphy, local->scan_req);
if (!ieee80211_can_scan(local,
rcu_dereference_protected(
local->scan_sdata,
- lockdep_is_held(&local->hw.wiphy->mtx))))
+ lockdep_is_held(&local->hw.wiphy->mtx)),
+ req))
return;
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
@@ -732,10 +738,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
!(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
return -EINVAL;
- if (!__ieee80211_can_leave_ch(sdata))
+ if (!__ieee80211_can_leave_ch(sdata, req))
return -EBUSY;
- if (!ieee80211_can_scan(local, sdata)) {
+ if (!ieee80211_can_scan(local, sdata, req)) {
/* wait for the work to finish/time out */
rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
@@ -794,6 +800,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_req->req.scan_6ghz_params =
req->scan_6ghz_params;
local->hw_scan_req->req.scan_6ghz = req->scan_6ghz;
+ local->hw_scan_req->req.first_part = req->first_part;
/*
* After allocating local->hw_scan_req, we must
@@ -989,15 +996,15 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_chandef.freq1_offset = chan->freq_offset;
local->scan_chandef.center_freq2 = 0;
- /* For scanning on the S1G band, detect the channel width according to
- * the channel being scanned.
- */
+ /* For S1G, only scan the 1MHz primaries. */
if (chan->band == NL80211_BAND_S1GHZ) {
- local->scan_chandef.width = ieee80211_s1g_channel_width(chan);
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_1;
+ local->scan_chandef.s1g_primary_2mhz = false;
goto set_channel;
}
- /* If scanning on oper channel, use whatever channel-type
+ /*
+ * If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
if (chan == local->hw.conf.chandef.chan)
@@ -1206,7 +1213,8 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!local->hw.wiphy->bands[band] ||
- band == NL80211_BAND_6GHZ)
+ band == NL80211_BAND_6GHZ ||
+ band == NL80211_BAND_S1GHZ)
continue;
max_n = local->hw.wiphy->bands[band]->n_channels;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 61583173629e..f4d3b67fda06 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
@@ -355,6 +355,50 @@ static void sta_info_free_link(struct link_sta_info *link_sta)
free_percpu(link_sta->pcpu_rx_stats);
}
+static void sta_accumulate_removed_link_stats(struct sta_info *sta, int link_id)
+{
+ struct link_sta_info *link_sta = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+ struct ieee80211_link_data *link;
+ int ac, tid;
+ u32 thr;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ sta->rem_link_stats.tx_packets +=
+ link_sta->tx_stats.packets[ac];
+ sta->rem_link_stats.tx_bytes += link_sta->tx_stats.bytes[ac];
+ }
+
+ sta->rem_link_stats.rx_packets += link_sta->rx_stats.packets;
+ sta->rem_link_stats.rx_bytes += link_sta->rx_stats.bytes;
+ sta->rem_link_stats.tx_retries += link_sta->status_stats.retry_count;
+ sta->rem_link_stats.tx_failed += link_sta->status_stats.retry_failed;
+ sta->rem_link_stats.rx_dropped_misc += link_sta->rx_stats.dropped;
+
+ thr = sta_get_expected_throughput(sta);
+ if (thr != 0)
+ sta->rem_link_stats.expected_throughput += thr;
+
+ for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
+ sta->rem_link_stats.pertid_stats.rx_msdu +=
+ link_sta->rx_stats.msdu[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu +=
+ link_sta->tx_stats.msdu[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu_retries +=
+ link_sta->status_stats.msdu_retries[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu_failed +=
+ link_sta->status_stats.msdu_failed[tid];
+ }
+
+ if (sta->sdata->vif.type == NL80211_IFTYPE_STATION) {
+ link = wiphy_dereference(sta->sdata->local->hw.wiphy,
+ sta->sdata->link[link_id]);
+ if (link)
+ sta->rem_link_stats.beacon_loss_count +=
+ link->u.mgd.beacon_loss_count;
+ }
+}
+
static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
bool unhash)
{
@@ -377,6 +421,10 @@ static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
alloc = container_of(link_sta, typeof(*alloc), info);
sta->sta.valid_links &= ~BIT(link_id);
+
+ /* store removed link info for accumulated stats consistency */
+ sta_accumulate_removed_link_stats(sta, link_id);
+
RCU_INIT_POINTER(sta->link[link_id], NULL);
RCU_INIT_POINTER(sta->sta.link[link_id], NULL);
if (alloc) {
@@ -681,6 +729,7 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
IEEE80211_RATE_MANDATORY_G;
break;
case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
mandatory = IEEE80211_RATE_MANDATORY_A;
break;
case NL80211_BAND_60GHZ:
@@ -1651,7 +1700,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sdata != sta->sdata)
continue;
@@ -2420,18 +2469,27 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
}
static struct ieee80211_sta_rx_stats *
-sta_get_last_rx_stats(struct sta_info *sta)
+sta_get_last_rx_stats(struct sta_info *sta, int link_id)
{
- struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
+ struct ieee80211_sta_rx_stats *stats;
+ struct link_sta_info *link_sta_info;
int cpu;
- if (!sta->deflink.pcpu_rx_stats)
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ stats = &link_sta_info->rx_stats;
+
+ if (!link_sta_info->pcpu_rx_stats)
return stats;
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpustats;
- cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu);
+ cpustats = per_cpu_ptr(link_sta_info->pcpu_rx_stats, cpu);
if (time_after(cpustats->last_rx, stats->last_rx))
stats = cpustats;
@@ -2499,9 +2557,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
}
}
-static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo,
+ int link_id)
{
- u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u32 rate = READ_ONCE(sta_get_last_rx_stats(sta, link_id)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
@@ -2526,20 +2585,28 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
static void sta_set_tidstats(struct sta_info *sta,
struct cfg80211_tid_stats *tidstats,
- int tid)
+ int tid, int link_id)
{
struct ieee80211_local *local = sta->local;
+ struct link_sta_info *link_sta_info;
int cpu;
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats,
- tid);
+ tidstats->rx_msdu +=
+ sta_get_tidstats_msdu(&link_sta_info->rx_stats,
+ tid);
- if (sta->deflink.pcpu_rx_stats) {
+ if (link_sta_info->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats,
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
cpu);
tidstats->rx_msdu +=
sta_get_tidstats_msdu(cpurxs, tid);
@@ -2551,30 +2618,30 @@ static void sta_set_tidstats(struct sta_info *sta,
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid];
+ tidstats->tx_msdu = link_sta_info->tx_stats.msdu[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid];
+ tidstats->tx_msdu_retries =
+ link_sta_info->status_stats.msdu_retries[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid];
+ tidstats->tx_msdu_failed =
+ link_sta_info->status_stats.msdu_failed[tid];
}
- if (tid < IEEE80211_NUM_TIDS) {
+ if (link_id < 0 && tid < IEEE80211_NUM_TIDS) {
spin_lock_bh(&local->fq.lock);
- rcu_read_lock();
tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS);
ieee80211_fill_txq_stats(&tidstats->txq_stats,
to_txq_info(sta->sta.txq[tid]));
- rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
}
}
@@ -2625,6 +2692,268 @@ static void sta_set_mesh_sinfo(struct sta_info *sta,
}
#endif
+void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta,
+ struct station_info *sinfo)
+{
+ /* Accumulating the removed link statistics. */
+ sinfo->tx_packets = sta->rem_link_stats.tx_packets;
+ sinfo->rx_packets = sta->rem_link_stats.rx_packets;
+ sinfo->tx_bytes = sta->rem_link_stats.tx_bytes;
+ sinfo->rx_bytes = sta->rem_link_stats.rx_bytes;
+ sinfo->tx_retries = sta->rem_link_stats.tx_retries;
+ sinfo->tx_failed = sta->rem_link_stats.tx_failed;
+ sinfo->rx_dropped_misc = sta->rem_link_stats.rx_dropped_misc;
+ sinfo->beacon_loss_count = sta->rem_link_stats.beacon_loss_count;
+ sinfo->expected_throughput = sta->rem_link_stats.expected_throughput;
+
+ if (sinfo->pertid) {
+ sinfo->pertid->rx_msdu =
+ sta->rem_link_stats.pertid_stats.rx_msdu;
+ sinfo->pertid->tx_msdu =
+ sta->rem_link_stats.pertid_stats.tx_msdu;
+ sinfo->pertid->tx_msdu_retries =
+ sta->rem_link_stats.pertid_stats.tx_msdu_retries;
+ sinfo->pertid->tx_msdu_failed =
+ sta->rem_link_stats.pertid_stats.tx_msdu_failed;
+ }
+}
+
+static void sta_set_link_sinfo(struct sta_info *sta,
+ struct link_station_info *link_sinfo,
+ struct ieee80211_link_data *link,
+ bool tidstats)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_sta_rx_stats *last_rxstats;
+ int i, ac, cpu, link_id = link->link_id;
+ struct link_sta_info *link_sta_info;
+ u32 thr = 0;
+
+ last_rxstats = sta_get_last_rx_stats(sta, link_id);
+
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ /* do before driver, so beacon filtering drivers have a
+ * chance to e.g. just add the number of filtered beacons
+ * (or just modify the value entirely, of course)
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ link_sinfo->rx_beacon = link->u.mgd.count_beacon_signal;
+
+ ether_addr_copy(link_sinfo->addr, link_sta_info->addr);
+
+ drv_link_sta_statistics(sta->local, sdata,
+ link_sta_info->pub,
+ link_sinfo);
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ link_sinfo->beacon_loss_count =
+ link->u.mgd.beacon_loss_count;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
+ }
+
+ link_sinfo->inactive_time =
+ jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, link_id));
+
+ if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
+ link_sinfo->tx_bytes = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_bytes +=
+ link_sta_info->tx_stats.bytes[ac];
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
+ link_sinfo->tx_packets = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_packets +=
+ link_sta_info->tx_stats.packets[ac];
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
+ link_sinfo->rx_bytes +=
+ sta_get_stats_bytes(&link_sta_info->rx_stats);
+
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_bytes +=
+ sta_get_stats_bytes(cpurxs);
+ }
+ }
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
+ link_sinfo->rx_packets = link_sta_info->rx_stats.packets;
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_packets += cpurxs->packets;
+ }
+ }
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
+ link_sinfo->tx_retries =
+ link_sta_info->status_stats.retry_count;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
+ link_sinfo->tx_failed =
+ link_sta_info->status_stats.retry_failed;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->rx_duration += sta->airtime[ac].rx_airtime;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_duration += sta->airtime[ac].tx_airtime;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
+ link_sinfo->airtime_weight = sta->airtime_weight;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
+ }
+
+ link_sinfo->rx_dropped_misc = link_sta_info->rx_stats.dropped;
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_dropped_misc += cpurxs->dropped;
+ }
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
+ BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ link_sinfo->rx_beacon_signal_avg =
+ ieee80211_ave_rssi(&sdata->vif, -1);
+ }
+
+ if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
+ ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
+ link_sinfo->signal = (s8)last_rxstats->last_signal;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ if (!link_sta_info->pcpu_rx_stats &&
+ !(link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
+ link_sinfo->signal_avg =
+ -ewma_signal_read(&link_sta_info->rx_stats_avg.signal);
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+ }
+
+ /* for the average - if pcpu_rx_stats isn't set - rxstats must point to
+ * the sta->rx_stats struct, so the check here is fine with and without
+ * pcpu statistics
+ */
+ if (last_rxstats->chains &&
+ !(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ if (!link_sta_info->pcpu_rx_stats)
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+
+ link_sinfo->chains = last_rxstats->chains;
+
+ for (i = 0; i < ARRAY_SIZE(link_sinfo->chain_signal); i++) {
+ link_sinfo->chain_signal[i] =
+ last_rxstats->chain_signal_last[i];
+ link_sinfo->chain_signal_avg[i] =
+ -ewma_signal_read(
+ &link_sta_info->rx_stats_avg.chain_signal[i]);
+ }
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) &&
+ ieee80211_rate_valid(&link_sta_info->tx_stats.last_rate)) {
+ sta_set_rate_info_tx(sta, &link_sta_info->tx_stats.last_rate,
+ &link_sinfo->txrate);
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
+ if (sta_set_rate_info_rx(sta, &link_sinfo->rxrate,
+ link_id) == 0)
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ if (tidstats && !cfg80211_link_sinfo_alloc_tid_stats(link_sinfo,
+ GFP_KERNEL)) {
+ for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
+ sta_set_tidstats(sta, &link_sinfo->pertid[i], i,
+ link_id);
+ }
+
+ link_sinfo->bss_param.flags = 0;
+ if (sdata->vif.bss_conf.use_cts_prot)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
+ if (sdata->vif.bss_conf.use_short_preamble)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+ if (sdata->vif.bss_conf.use_short_slot)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+ link_sinfo->bss_param.dtim_period = link->conf->dtim_period;
+ link_sinfo->bss_param.beacon_interval = link->conf->beacon_int;
+
+ thr = sta_get_expected_throughput(sta);
+
+ if (thr != 0) {
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+ link_sinfo->expected_throughput = thr;
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) &&
+ link_sta_info->status_stats.ack_signal_filled) {
+ link_sinfo->ack_signal =
+ link_sta_info->status_stats.last_ack_signal;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
+ link_sta_info->status_stats.ack_signal_filled) {
+ link_sinfo->avg_ack_signal =
+ -(s8)ewma_avg_signal_read(
+ &link_sta_info->status_stats.avg_ack_signal);
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
+ }
+}
+
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
bool tidstats)
{
@@ -2634,7 +2963,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
int i, ac, cpu;
struct ieee80211_sta_rx_stats *last_rxstats;
- last_rxstats = sta_get_last_rx_stats(sta);
+ last_rxstats = sta_get_last_rx_stats(sta, -1);
sinfo->generation = sdata->local->sta_generation;
@@ -2662,7 +2991,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
sinfo->assoc_at = sta->assoc_at;
sinfo->inactive_time =
- jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
+ jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, -1));
if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
@@ -2751,7 +3080,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
- sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
+ sinfo->rx_beacon_signal_avg =
+ ieee80211_ave_rssi(&sdata->vif, -1);
}
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
@@ -2800,13 +3130,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) &&
!sta->sta.valid_links) {
- if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
+ if (sta_set_rate_info_rx(sta, &sinfo->rxrate, -1) == 0)
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
- sta_set_tidstats(sta, &sinfo->pertid[i], i);
+ sta_set_tidstats(sta, &sinfo->pertid[i], i, -1);
}
#ifdef CONFIG_MAC80211_MESH
@@ -2868,6 +3198,31 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |=
BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
+
+ if (sta->sta.valid_links) {
+ struct ieee80211_link_data *link;
+ struct link_sta_info *link_sta;
+ int link_id;
+
+ ether_addr_copy(sinfo->mld_addr, sta->addr);
+
+ /* assign valid links first for iteration */
+ sinfo->valid_links = sta->sta.valid_links;
+
+ for_each_valid_link(sinfo, link_id) {
+ link_sta = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+ link = wiphy_dereference(sdata->local->hw.wiphy,
+ sdata->link[link_id]);
+
+ if (!link_sta || !sinfo->links[link_id] || !link) {
+ sinfo->valid_links &= ~BIT(link_id);
+ continue;
+ }
+ sta_set_link_sinfo(sta, sinfo->links[link_id],
+ link, tidstats);
+ }
+ }
}
u32 sta_get_expected_throughput(struct sta_info *sta)
@@ -2889,14 +3244,24 @@ u32 sta_get_expected_throughput(struct sta_info *sta)
return thr;
}
-unsigned long ieee80211_sta_last_active(struct sta_info *sta)
+unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id)
{
- struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
+ struct ieee80211_sta_rx_stats *stats;
+ struct link_sta_info *link_sta_info;
- if (!sta->deflink.status_stats.last_ack ||
- time_after(stats->last_rx, sta->deflink.status_stats.last_ack))
+ stats = sta_get_last_rx_stats(sta, link_id);
+
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ if (!link_sta_info->status_stats.last_ack ||
+ time_after(stats->last_rx, link_sta_info->status_stats.last_ack))
return stats->last_rx;
- return sta->deflink.status_stats.last_ack;
+
+ return link_sta_info->status_stats.last_ack;
}
int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 7a95d8d34fca..5288d5286651 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -569,6 +569,58 @@ struct link_sta_info {
};
/**
+ * struct ieee80211_sta_removed_link_stats - Removed link sta data
+ *
+ * keep required accumulated removed link data for stats
+ *
+ * @rx_packets: accumulated packets (MSDUs & MMPDUs) received from
+ * this station for removed links
+ * @tx_packets: accumulated packets (MSDUs & MMPDUs) transmitted to
+ * this station for removed links
+ * @rx_bytes: accumulated bytes (size of MPDUs) received from this
+ * station for removed links
+ * @tx_bytes: accumulated bytes (size of MPDUs) transmitted to this
+ * station for removed links
+ * @tx_retries: cumulative retry counts (MPDUs) for removed links
+ * @tx_failed: accumulated number of failed transmissions (MPDUs)
+ * (retries exceeded, no ACK) for removed links
+ * @rx_dropped_misc: accumulated dropped packets for un-specified reason
+ * from this station for removed links
+ * @beacon_loss_count: Number of times beacon loss event has triggered
+ * from this station for removed links.
+ * @expected_throughput: expected throughput in kbps (including 802.11
+ * headers) towards this station for removed links
+ * @pertid_stats: accumulated per-TID statistics for removed link of
+ * station
+ * @pertid_stats.rx_msdu : accumulated number of received MSDUs towards
+ * this station for removed links.
+ * @pertid_stats.tx_msdu: accumulated number of (attempted) transmitted
+ * MSDUs towards this station for removed links
+ * @pertid_stats.tx_msdu_retries: accumulated number of retries (not
+ * counting the first) for transmitted MSDUs towards this station
+ * for removed links
+ * @pertid_stats.tx_msdu_failed: accumulated number of failed transmitted
+ * MSDUs towards this station for removed links
+ */
+struct ieee80211_sta_removed_link_stats {
+ u32 rx_packets;
+ u32 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ u32 beacon_loss_count;
+ u32 expected_throughput;
+ struct {
+ u64 rx_msdu;
+ u64 tx_msdu;
+ u64 tx_msdu_retries;
+ u64 tx_msdu_failed;
+ } pertid_stats;
+};
+
+/**
* struct sta_info - STA information
*
* This structure collects information about a station that
@@ -644,6 +696,7 @@ struct link_sta_info {
* @deflink address and remaining would be allocated and the address
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
+ * @rem_link_stats: accumulated removed link stats
*/
struct sta_info {
/* General information, mostly static */
@@ -718,6 +771,7 @@ struct sta_info {
struct ieee80211_sta_aggregates cur;
struct link_sta_info deflink;
struct link_sta_info __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ieee80211_sta_removed_link_stats rem_link_stats;
/* keep last! */
struct ieee80211_sta sta;
@@ -922,6 +976,9 @@ void sta_set_rate_info_tx(struct sta_info *sta,
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
bool tidstats);
+void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta,
+ struct station_info *sinfo);
+
u32 sta_get_expected_throughput(struct sta_info *sta);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
@@ -936,7 +993,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
-unsigned long ieee80211_sta_last_active(struct sta_info *sta);
+unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id);
void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
const u8 *ext_capab,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index a362254b310c..4b38aa0e902a 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2021-2024 Intel Corporation
+ * Copyright 2021-2025 Intel Corporation
*/
#include <linux/export.h>
@@ -572,6 +572,7 @@ static struct ieee80211_sub_if_data *
ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
if (skb->dev) {
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -585,7 +586,23 @@ ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
return NULL;
}
- return rcu_dereference(local->p2p_sdata);
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+ case NL80211_IFTYPE_NAN:
+ if (sdata->u.nan.started)
+ break;
+ fallthrough;
+ default:
+ continue;
+ }
+
+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
+ return sdata;
+ }
+
+ return NULL;
}
static void ieee80211_report_ack_skb(struct ieee80211_local *local,
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 94714f8ffd22..ba5fbacbeeda 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1422,7 +1422,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
return -EOPNOTSUPP;
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->vif.cfg.assoc)
return -EINVAL;
switch (oper) {
diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile
index 3b0c08356fc5..3c7f874e5c41 100644
--- a/net/mac80211/tests/Makefile
+++ b/net/mac80211/tests/Makefile
@@ -1,3 +1,3 @@
-mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o chan-mode.o
+mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o chan-mode.o s1g_tim.o
obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
diff --git a/net/mac80211/tests/chan-mode.c b/net/mac80211/tests/chan-mode.c
index 96c7b3ab2744..adc069065e73 100644
--- a/net/mac80211/tests/chan-mode.c
+++ b/net/mac80211/tests/chan-mode.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for channel mode functions
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/cfg80211.h>
#include <kunit/test.h>
@@ -28,6 +28,10 @@ static const struct determine_chan_mode_case {
u8 vht_basic_mcs_1_4, vht_basic_mcs_5_8;
u8 he_basic_mcs_1_4, he_basic_mcs_5_8;
u8 eht_mcs7_min_nss;
+ u16 eht_disabled_subchannels;
+ u8 eht_bw;
+ enum ieee80211_conn_bw_limit conn_bw_limit;
+ enum ieee80211_conn_bw_limit expected_bw_limit;
int error;
} determine_chan_mode_cases[] = {
{
@@ -128,6 +132,14 @@ static const struct determine_chan_mode_case {
.conn_mode = IEEE80211_CONN_MODE_EHT,
.eht_mcs7_min_nss = 0x15,
.error = EINVAL,
+ }, {
+ .desc = "80 MHz EHT is downgraded to 40 MHz HE due to puncturing",
+ .conn_mode = IEEE80211_CONN_MODE_EHT,
+ .expected_mode = IEEE80211_CONN_MODE_HE,
+ .conn_bw_limit = IEEE80211_CONN_BW_LIMIT_80,
+ .expected_bw_limit = IEEE80211_CONN_BW_LIMIT_40,
+ .eht_disabled_subchannels = 0x08,
+ .eht_bw = IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ,
}
};
KUNIT_ARRAY_PARAM_DESC(determine_chan_mode, determine_chan_mode_cases, desc)
@@ -138,7 +150,7 @@ static void test_determine_chan_mode(struct kunit *test)
struct t_sdata *t_sdata = T_SDATA(test);
struct ieee80211_conn_settings conn = {
.mode = params->conn_mode,
- .bw_limit = IEEE80211_CONN_BW_LIMIT_20,
+ .bw_limit = params->conn_bw_limit,
};
struct cfg80211_bss cbss = {
.channel = &t_sdata->band_5ghz.channels[0],
@@ -191,14 +203,21 @@ static void test_determine_chan_mode(struct kunit *test)
0x7f, 0x01, 0x00, 0x88, 0x88, 0x88, 0x00, 0x00,
0x00,
/* EHT Operation */
- WLAN_EID_EXTENSION, 0x09, WLAN_EID_EXT_EHT_OPERATION,
- 0x01, params->eht_mcs7_min_nss ? params->eht_mcs7_min_nss : 0x11,
- 0x00, 0x00, 0x00, 0x00, 0x24, 0x00,
+ WLAN_EID_EXTENSION, 0x0b, WLAN_EID_EXT_EHT_OPERATION,
+ 0x03, params->eht_mcs7_min_nss ? params->eht_mcs7_min_nss : 0x11,
+ 0x00, 0x00, 0x00, params->eht_bw,
+ params->eht_bw == IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ ? 42 : 36,
+ 0x00,
+ u16_get_bits(params->eht_disabled_subchannels, 0xff),
+ u16_get_bits(params->eht_disabled_subchannels, 0xff00),
};
struct ieee80211_chan_req chanreq = {};
struct cfg80211_chan_def ap_chandef = {};
struct ieee802_11_elems *elems;
+ /* To force EHT downgrade to HE on punctured 80 MHz downgraded to 40 MHz */
+ set_bit(IEEE80211_HW_DISALLOW_PUNCTURING, t_sdata->local.hw.flags);
+
if (params->strict)
set_bit(IEEE80211_HW_STRICT, t_sdata->local.hw.flags);
else
@@ -237,6 +256,7 @@ static void test_determine_chan_mode(struct kunit *test)
} else {
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elems);
KUNIT_ASSERT_EQ(test, conn.mode, params->expected_mode);
+ KUNIT_ASSERT_EQ(test, conn.bw_limit, params->expected_bw_limit);
}
}
diff --git a/net/mac80211/tests/s1g_tim.c b/net/mac80211/tests/s1g_tim.c
new file mode 100644
index 000000000000..642fa4ece89f
--- /dev/null
+++ b/net/mac80211/tests/s1g_tim.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for S1G TIM PVB decoding. This test suite covers
+ * IEEE80211-2024 Annex L figures 8, 9, 10, 12, 13, 14. ADE mode
+ * is not covered as it is an optional encoding format and is not
+ * currently supported by mac80211.
+ *
+ * Copyright (C) 2025 Morse Micro
+ */
+#include <linux/ieee80211.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#define MAX_AID 128
+
+#define BC(enc_mode, inverse, blk_off) \
+ ((((blk_off) & 0x1f) << 3) | ((inverse) ? BIT(2) : 0) | \
+ ((enc_mode) & 0x3))
+
+static void byte_to_bitstr(u8 v, char *out)
+{
+ for (int b = 7; b >= 0; b--)
+ *out++ = (v & BIT(b)) ? '1' : '0';
+ *out = '\0';
+}
+
+static void dump_tim_bits(struct kunit *test,
+ const struct ieee80211_tim_ie *tim, u8 tim_len)
+{
+ const u8 *ptr = tim->virtual_map;
+ const u8 *end = (const u8 *)tim + tim_len;
+ unsigned int oct = 1;
+ unsigned int blk = 0;
+ char bits[9];
+
+ while (ptr < end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool inverse = ctrl & BIT(2);
+ u8 blk_off = ctrl >> 3;
+
+ kunit_info(
+ test, "Block %u (ENC=%s, blk_off=%u, inverse=%u)", blk,
+ (mode == IEEE80211_S1G_TIM_ENC_MODE_BLOCK) ? "BLOCK" :
+ (mode == IEEE80211_S1G_TIM_ENC_MODE_SINGLE) ? "SINGLE" :
+ "OLB",
+ blk_off, inverse);
+
+ byte_to_bitstr(ctrl, bits);
+ kunit_info(test, " octet %2u (ctrl) : %s (0x%02x)", oct,
+ bits, ctrl);
+ ++oct;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK: {
+ u8 blkmap = *ptr++;
+
+ byte_to_bitstr(blkmap, bits);
+ kunit_info(test, " octet %2u (blk-map) : %s (0x%02x)",
+ oct, bits, blkmap);
+ ++oct;
+
+ for (u8 sb = 0; sb < 8; sb++) {
+ if (!(blkmap & BIT(sb)))
+ continue;
+ u8 sub = *ptr++;
+
+ byte_to_bitstr(sub, bits);
+ kunit_info(
+ test,
+ " octet %2u (SB %2u) : %s (0x%02x)",
+ oct, sb, bits, sub);
+ ++oct;
+ }
+ break;
+ }
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE: {
+ u8 single = *ptr++;
+
+ byte_to_bitstr(single, bits);
+ kunit_info(test, " octet %2u (single) : %s (0x%02x)",
+ oct, bits, single);
+ ++oct;
+ break;
+ }
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB: {
+ u8 len = *ptr++;
+
+ byte_to_bitstr(len, bits);
+ kunit_info(test, " octet %2u (len=%2u) : %s (0x%02x)",
+ oct, len, bits, len);
+ ++oct;
+
+ for (u8 i = 0; i < len && ptr < end; i++) {
+ u8 sub = *ptr++;
+
+ byte_to_bitstr(sub, bits);
+ kunit_info(
+ test,
+ " octet %2u (SB %2u) : %s (0x%02x)",
+ oct, i, bits, sub);
+ ++oct;
+ }
+ break;
+ }
+ default:
+ kunit_info(test, " ** unknown encoding 0x%x **", mode);
+ return;
+ }
+ blk++;
+ }
+}
+
+static void tim_push(u8 **p, u8 v)
+{
+ *(*p)++ = v;
+}
+
+static void tim_begin(struct ieee80211_tim_ie *tim, u8 **p)
+{
+ tim->dtim_count = 0;
+ tim->dtim_period = 1;
+ tim->bitmap_ctrl = 0;
+ *p = tim->virtual_map;
+}
+
+static u8 tim_end(struct ieee80211_tim_ie *tim, u8 *tail)
+{
+ return tail - (u8 *)tim;
+}
+
+static void pvb_add_block_bitmap(u8 **p, u8 blk_off, bool inverse, u8 blk_bmap,
+ const u8 *subblocks)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_BLOCK;
+ u8 n = hweight8(blk_bmap);
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, blk_bmap);
+
+ for (u8 i = 0; i < n; i++)
+ tim_push(p, subblocks[i]);
+}
+
+static void pvb_add_single_aid(u8 **p, u8 blk_off, bool inverse, u8 single6)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_SINGLE;
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, single6 & GENMASK(5, 0));
+}
+
+static void pvb_add_olb(u8 **p, u8 blk_off, bool inverse, const u8 *subblocks,
+ u8 len)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_OLB;
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, len);
+ for (u8 i = 0; i < len; i++)
+ tim_push(p, subblocks[i]);
+}
+
+static void check_all_aids(struct kunit *test,
+ const struct ieee80211_tim_ie *tim, u8 tim_len,
+ const unsigned long *expected)
+{
+ for (u16 aid = 1; aid <= MAX_AID; aid++) {
+ bool want = test_bit(aid, expected);
+ bool got = ieee80211_s1g_check_tim(tim, tim_len, aid);
+
+ KUNIT_ASSERT_EQ_MSG(test, got, want,
+ "AID %u mismatch (got=%d want=%d)", aid,
+ got, want);
+ }
+}
+
+static void fill_bitmap(unsigned long *bm, const u16 *list, size_t n)
+{
+ size_t i;
+
+ bitmap_zero(bm, MAX_AID + 1);
+ for (i = 0; i < n; i++)
+ __set_bit(list[i], bm);
+}
+
+static void fill_bitmap_inverse(unsigned long *bm, u16 max_aid,
+ const u16 *except, size_t n_except)
+{
+ bitmap_zero(bm, MAX_AID + 1);
+ for (u16 aid = 1; aid <= max_aid; aid++)
+ __set_bit(aid, bm);
+
+ for (size_t i = 0; i < n_except; i++)
+ if (except[i] <= max_aid)
+ __clear_bit(except[i], bm);
+}
+
+static void s1g_tim_block_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ static const u8 subblocks[] = {
+ 0x42, /* SB m=0: AIDs 1,6 */
+ 0xA0, /* SB m=2: AIDs 21,23 */
+ };
+ u8 blk_bmap = 0x05; /* bits 0 and 2 set */
+ bool inverse = false;
+ static const u16 set_list[] = { 1, 6, 21, 23 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_block_bitmap(&p, 0, inverse, blk_bmap, subblocks);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_single_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = false;
+ u8 blk_off = 0;
+ u8 single6 = 0x1f; /* 31 */
+ static const u16 set_list[] = { 31 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_single_aid(&p, blk_off, inverse, single6);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_olb_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = false;
+ u8 blk_off = 0;
+ static const u16 set_list[] = { 1, 6, 13, 15, 17, 22, 29, 31, 33,
+ 38, 45, 47, 49, 54, 61, 63, 65, 70 };
+ static const u8 subblocks[] = { 0x42, 0xA0, 0x42, 0xA0, 0x42,
+ 0xA0, 0x42, 0xA0, 0x42 };
+ u8 len = ARRAY_SIZE(subblocks);
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_olb(&p, blk_off, inverse, subblocks, len);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_block_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ /* Same sub-block content as Figure L-8, but inverse = true */
+ static const u8 subblocks[] = {
+ 0x42, /* SB m=0: AIDs 1,6 */
+ 0xA0, /* SB m=2: AIDs 21,23 */
+ };
+ u8 blk_bmap = 0x05;
+ bool inverse = true;
+ /* All AIDs except 1,6,21,23 are set */
+ static const u16 except[] = { 1, 6, 21, 23 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_block_bitmap(&p, 0, inverse, blk_bmap, subblocks);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 63, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_single_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = true;
+ u8 blk_off = 0;
+ u8 single6 = 0x1f; /* 31 */
+ /* All AIDs except 31 are set */
+ static const u16 except[] = { 31 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_single_aid(&p, blk_off, inverse, single6);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 63, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_olb_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = true;
+ u8 blk_off = 0, len;
+ /* All AIDs except the list below are set */
+ static const u16 except[] = { 1, 6, 13, 15, 17, 22, 29, 31, 33,
+ 38, 45, 47, 49, 54, 61, 63, 65, 70 };
+ static const u8 subblocks[] = { 0x42, 0xA0, 0x42, 0xA0, 0x42,
+ 0xA0, 0x42, 0xA0, 0x42 };
+ len = ARRAY_SIZE(subblocks);
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_olb(&p, blk_off, inverse, subblocks, len);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 127, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static struct kunit_case s1g_tim_test_cases[] = {
+ KUNIT_CASE(s1g_tim_block_test),
+ KUNIT_CASE(s1g_tim_single_test),
+ KUNIT_CASE(s1g_tim_olb_test),
+ KUNIT_CASE(s1g_tim_inverse_block_test),
+ KUNIT_CASE(s1g_tim_inverse_single_test),
+ KUNIT_CASE(s1g_tim_inverse_olb_test),
+ {}
+};
+
+static struct kunit_suite s1g_tim = {
+ .name = "mac80211-s1g-tim",
+ .test_cases = s1g_tim_test_cases,
+};
+
+kunit_test_suite(s1g_tim);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 72fad8ea8bb9..0bfbce157486 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -384,12 +384,14 @@ DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface,
TRACE_EVENT(drv_config,
TP_PROTO(struct ieee80211_local *local,
+ int radio_idx,
u32 changed),
- TP_ARGS(local, changed),
+ TP_ARGS(local, radio_idx, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(u32, changed)
__field(u32, flags)
__field(int, power_level)
@@ -403,6 +405,7 @@ TRACE_EVENT(drv_config,
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->changed = changed;
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
@@ -417,8 +420,8 @@ TRACE_EVENT(drv_config,
),
TP_printk(
- LOCAL_PR_FMT " ch:%#x" CHANDEF_PR_FMT,
- LOCAL_PR_ARG, __entry->changed, CHANDEF_PR_ARG
+ LOCAL_PR_FMT " radio_idx:%d ch:%#x" CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->changed, CHANDEF_PR_ARG
)
);
@@ -818,34 +821,71 @@ TRACE_EVENT(drv_get_key_seq,
)
);
-DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value)
+TRACE_EVENT(drv_set_frag_threshold,
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value),
+
+ TP_ARGS(local, radio_idx, value),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, radio_idx)
+ __field(u32, value)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " radio_id:%d value:%u",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
+ )
);
-DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value)
+TRACE_EVENT(drv_set_rts_threshold,
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value),
+
+ TP_ARGS(local, radio_idx, value),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, radio_idx)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " radio_id:%d value:%u",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
+ )
);
TRACE_EVENT(drv_set_coverage_class,
- TP_PROTO(struct ieee80211_local *local, s16 value),
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, s16 value),
- TP_ARGS(local, value),
+ TP_ARGS(local, radio_idx, value),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(s16, value)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d",
- LOCAL_PR_ARG, __entry->value
+ LOCAL_PR_FMT " radio_id:%d value:%d",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
)
);
@@ -1002,6 +1042,33 @@ DEFINE_EVENT(sta_event, drv_sta_statistics,
TP_ARGS(local, sdata, sta)
);
+TRACE_EVENT(drv_link_sta_statistics,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta),
+
+ TP_ARGS(local, sdata, link_sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u32, link_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_NAMED_ASSIGN(link_sta->sta);
+ __entry->link_id = link_sta->link_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " (link %d)",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id
+ )
+);
+
DEFINE_EVENT(sta_event, drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -1291,12 +1358,14 @@ TRACE_EVENT(drv_set_antenna,
);
TRACE_EVENT(drv_get_antenna,
- TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 tx_ant,
+ u32 rx_ant, int ret),
- TP_ARGS(local, tx_ant, rx_ant, ret),
+ TP_ARGS(local, radio_idx, tx_ant, rx_ant, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(u32, tx_ant)
__field(u32, rx_ant)
__field(int, ret)
@@ -1304,14 +1373,16 @@ TRACE_EVENT(drv_get_antenna,
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->tx_ant = tx_ant;
__entry->rx_ant = rx_ant;
__entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
- LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
+ LOCAL_PR_FMT " radio_idx:%d tx_ant:%d rx_ant:%d ret:%d",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->tx_ant,
+ __entry->rx_ant, __entry->ret
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d8d4f3d7d7f2..e7b141c55f7a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*
* Transmit and frame generation functions.
*/
@@ -59,6 +59,9 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (WARN_ON_ONCE(tx->rate.idx < 0))
return 0;
+ if (info->band >= NUM_NL80211_BANDS)
+ return 0;
+
sband = local->hw.wiphy->bands[info->band];
txrate = &sband->bitrates[tx->rate.idx];
@@ -612,6 +615,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else
tx->key = NULL;
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+ info->control.hw_key = &tx->key->conf;
+ return TX_CONTINUE;
+ }
+
if (tx->key) {
bool skip_hw = false;
@@ -677,7 +686,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
memset(&txrc, 0, sizeof(txrc));
- sband = tx->local->hw.wiphy->bands[info->band];
+ if (info->band < NUM_NL80211_BANDS)
+ sband = tx->local->hw.wiphy->bands[info->band];
+ else
+ return TX_CONTINUE;
len = min_t(u32, tx->skb->len + FCS_LEN,
tx->local->hw.wiphy->frag_threshold);
@@ -1173,7 +1185,8 @@ void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
return;
if (!sta ||
- (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported) ||
+ (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported &&
+ !sta->sta.deflink.s1g_cap.s1g) ||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
skb->protocol == sdata->control_port_protocol)
return;
@@ -1428,7 +1441,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
{
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
- u32 flow_idx = fq_flow_idx(fq, skb);
+ u32 flow_idx;
ieee80211_set_skb_enqueue_time(skb);
@@ -1444,6 +1457,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(&txqi->frags, skb);
} else {
+ flow_idx = fq_flow_idx(fq, skb);
fq_tin_enqueue(fq, tin, flow_idx, skb,
fq_skb_free_func);
}
@@ -1541,7 +1555,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
-void ieee80211_txq_set_params(struct ieee80211_local *local)
+void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx)
{
if (local->hw.wiphy->txq_limit)
local->fq.limit = local->hw.wiphy->txq_limit;
@@ -1605,7 +1619,7 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
for (i = 0; i < fq->flows_cnt; i++)
codel_vars_init(&local->cvars[i]);
- ieee80211_txq_set_params(local);
+ ieee80211_txq_set_params(local, -1);
return 0;
}
@@ -1806,7 +1820,7 @@ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
- I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ tx->sdata->tx_handlers_drop++;
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
@@ -1850,7 +1864,7 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
- I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ tx->sdata->tx_handlers_drop++;
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
else
@@ -1934,6 +1948,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
if (unlikely(res_prepare == TX_DROP)) {
ieee80211_free_txskb(&local->hw, skb);
+ tx.sdata->tx_handlers_drop++;
return true;
} else if (unlikely(res_prepare == TX_QUEUED)) {
return true;
@@ -3720,8 +3735,10 @@ void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
fast_tx->key, &tx);
tx.skb = NULL;
- if (r == TX_DROP)
+ if (r == TX_DROP) {
+ tx.sdata->tx_handlers_drop++;
goto free;
+ }
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -3876,6 +3893,7 @@ begin:
* The key can be removed while the packet was queued, so need to call
* this here to get the current key.
*/
+ info->control.hw_key = NULL;
r = ieee80211_tx_h_select_key(&tx);
if (r != TX_CONTINUE) {
ieee80211_free_txskb(&local->hw, skb);
@@ -4098,7 +4116,9 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
spin_lock_bh(&local->active_txq_lock[txq->ac]);
- has_queue = force || txq_has_queue(txq);
+ has_queue = force ||
+ (!test_bit(IEEE80211_TXQ_STOP, &txqi->flags) &&
+ txq_has_queue(txq));
if (list_empty(&txqi->schedule_order) &&
(has_queue || ieee80211_txq_keep_active(txqi))) {
/* If airtime accounting is active, always enqueue STAs at the
@@ -4871,15 +4891,114 @@ void ieee80211_tx_pending(struct tasklet_struct *t)
/* functions for drivers to get certain frames */
+static void ieee80211_beacon_add_tim_pvb(struct ps_data *ps,
+ struct sk_buff *skb,
+ bool mcast_traffic)
+{
+ int i, n1 = 0, n2;
+
+ /*
+ * Find largest even number N1 so that bits numbered 1 through
+ * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
+ * (N2 + 1) x 8 through 2007 are 0.
+ */
+ for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
+ if (ps->tim[i]) {
+ n1 = i & 0xfe;
+ break;
+ }
+ }
+ n2 = n1;
+ for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
+ if (ps->tim[i]) {
+ n2 = i;
+ break;
+ }
+ }
+
+ /* Bitmap control */
+ skb_put_u8(skb, n1 | mcast_traffic);
+ /* Part Virt Bitmap */
+ skb_put_data(skb, ps->tim + n1, n2 - n1 + 1);
+}
+
+/*
+ * mac80211 currently supports encoding using block bitmap mode, non
+ * inversed. The current implementation supports up to 1600 AIDs.
+ *
+ * Block bitmap encoding breaks down the AID bitmap into blocks of 64
+ * AIDs. Each block contains between 0 and 8 subblocks. Each subblock
+ * describes 8 AIDs and the presence of a subblock is determined by
+ * the block bitmap.
+ */
+static void ieee80211_s1g_beacon_add_tim_pvb(struct ps_data *ps,
+ struct sk_buff *skb,
+ bool mcast_traffic)
+{
+ int blk;
+
+ /*
+ * Emit a bitmap control block with a page slice number of 31 and a
+ * page index of 0 which indicates as per IEEE80211-2024 9.4.2.5.1
+ * that the entire page (2048 bits) indicated by the page index
+ * is encoded in the partial virtual bitmap.
+ */
+ skb_put_u8(skb, mcast_traffic | (31 << 1));
+
+ /* Emit an encoded block for each non-zero sub-block */
+ for (blk = 0; blk < IEEE80211_MAX_SUPPORTED_S1G_TIM_BLOCKS; blk++) {
+ u8 blk_bmap = 0;
+ int sblk;
+
+ for (sblk = 0; sblk < 8; sblk++) {
+ int sblk_idx = blk * 8 + sblk;
+
+ /*
+ * If the current subblock is non-zero, increase the
+ * number of subblocks to emit for the current block.
+ */
+ if (ps->tim[sblk_idx])
+ blk_bmap |= BIT(sblk);
+ }
+
+ /* If the current block contains no non-zero sublocks */
+ if (!blk_bmap)
+ continue;
+
+ /*
+ * Emit a block control byte for the current encoded block
+ * with an encoding mode of block bitmap (0x0), not inverse
+ * (0x0) and the current block offset (5 bits)
+ */
+ skb_put_u8(skb, blk << 3);
+
+ /*
+ * Emit the block bitmap for the current encoded block which
+ * contains the present subblocks.
+ */
+ skb_put_u8(skb, blk_bmap);
+
+ /* Emit the present subblocks */
+ for (sblk = 0; sblk < 8; sblk++) {
+ int sblk_idx = blk * 8 + sblk;
+
+ if (!(blk_bmap & BIT(sblk)))
+ continue;
+
+ skb_put_u8(skb, ps->tim[sblk_idx]);
+ }
+ }
+}
+
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct ps_data *ps, struct sk_buff *skb,
bool is_template)
{
- u8 *pos, *tim;
- int aid0 = 0;
- int i, have_bits = 0, n1, n2;
+ struct element *tim;
+ bool mcast_traffic = false, have_bits = false;
struct ieee80211_bss_conf *link_conf = link->conf;
+ bool s1g = ieee80211_get_link_sband(link)->band == NL80211_BAND_S1GHZ;
/* Generate bitmap for TIM only if there are any STAs in power save
* mode. */
@@ -4887,7 +5006,8 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
/* in the hope that this is faster than
* checking byte-for-byte */
have_bits = !bitmap_empty((unsigned long *)ps->tim,
- IEEE80211_MAX_AID+1);
+ IEEE80211_MAX_AID + 1);
+
if (!is_template) {
if (ps->dtim_count == 0)
ps->dtim_count = link_conf->dtim_period - 1;
@@ -4895,51 +5015,39 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
ps->dtim_count--;
}
- tim = pos = skb_put(skb, 5);
- *pos++ = WLAN_EID_TIM;
- *pos++ = 3;
- *pos++ = ps->dtim_count;
- *pos++ = link_conf->dtim_period;
+ /* Length is set after parsing the AID bitmap */
+ tim = skb_put(skb, sizeof(struct element));
+ tim->id = WLAN_EID_TIM;
+ skb_put_u8(skb, ps->dtim_count);
+ skb_put_u8(skb, link_conf->dtim_period);
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
- aid0 = 1;
+ mcast_traffic = true;
- ps->dtim_bc_mc = aid0 == 1;
+ ps->dtim_bc_mc = mcast_traffic;
if (have_bits) {
- /* Find largest even number N1 so that bits numbered 1 through
- * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
- * (N2 + 1) x 8 through 2007 are 0. */
- n1 = 0;
- for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
- if (ps->tim[i]) {
- n1 = i & 0xfe;
- break;
- }
- }
- n2 = n1;
- for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
- if (ps->tim[i]) {
- n2 = i;
- break;
- }
- }
-
- /* Bitmap control */
- *pos++ = n1 | aid0;
- /* Part Virt Bitmap */
- skb_put_data(skb, ps->tim + n1, n2 - n1 + 1);
-
- tim[1] = n2 - n1 + 4;
+ if (s1g)
+ ieee80211_s1g_beacon_add_tim_pvb(ps, skb,
+ mcast_traffic);
+ else
+ ieee80211_beacon_add_tim_pvb(ps, skb, mcast_traffic);
} else {
- *pos++ = aid0; /* Bitmap control */
-
- if (ieee80211_get_link_sband(link)->band != NL80211_BAND_S1GHZ) {
- tim[1] = 4;
+ /*
+ * If there is no buffered unicast traffic for an S1G
+ * interface, we can exclude the bitmap control. This is in
+ * contrast to other phy types as they do include the bitmap
+ * control and pvb even when there is no buffered traffic.
+ */
+ if (!s1g) {
+ /* Bitmap control */
+ skb_put_u8(skb, mcast_traffic);
/* Part Virt Bitmap */
skb_put_u8(skb, 0);
}
}
+
+ tim->datalen = skb_tail_pointer(skb) - tim->data;
}
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
@@ -5016,12 +5124,25 @@ static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
}
}
-static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
+static u8 __ieee80211_beacon_update_cntdwn(struct ieee80211_link_data *link,
+ struct beacon_data *beacon)
{
- beacon->cntdwn_current_counter--;
+ if (beacon->cntdwn_current_counter == 1) {
+ /*
+ * Channel switch handling is done by a worker thread while
+ * beacons get pulled from hardware timers. It's therefore
+ * possible that software threads are slow enough to not be
+ * able to complete CSA handling in a single beacon interval,
+ * in which case we get here. There isn't much to do about
+ * it, other than letting the user know that the AP isn't
+ * behaving correctly.
+ */
+ link_err_once(link,
+ "beacon TX faster than countdown (channel/color switch) completion\n");
+ return 0;
+ }
- /* the counter should never reach 0 */
- WARN_ON_ONCE(!beacon->cntdwn_current_counter);
+ beacon->cntdwn_current_counter--;
return beacon->cntdwn_current_counter;
}
@@ -5052,7 +5173,7 @@ u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_i
if (!beacon)
goto unlock;
- count = __ieee80211_beacon_update_cntdwn(beacon);
+ count = __ieee80211_beacon_update_cntdwn(link, beacon);
unlock:
rcu_read_unlock();
@@ -5276,14 +5397,14 @@ ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon,
}
static struct sk_buff *
-ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_data *link,
- struct ieee80211_mutable_offsets *offs,
- bool is_template,
- struct beacon_data *beacon,
- struct ieee80211_chanctx_conf *chanctx_conf,
- u8 ema_index)
+__ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_mutable_offsets *offs,
+ bool is_template,
+ struct beacon_data *beacon,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ u8 ema_index)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -5344,6 +5465,71 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
return skb;
}
+static bool ieee80211_s1g_need_long_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link)
+{
+ struct ps_data *ps = &sdata->u.ap.ps;
+
+ if (ps->sb_count == 0)
+ ps->sb_count = link->conf->s1g_long_beacon_period - 1;
+ else
+ ps->sb_count--;
+
+ return ps->sb_count == 0;
+}
+
+static struct sk_buff *
+ieee80211_s1g_short_beacon_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ struct s1g_short_beacon_data *sb,
+ bool is_template)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_ap *ap = &sdata->u.ap;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(local->tx_headroom + sb->short_head_len +
+ sb->short_tail_len + 256 +
+ local->hw.extra_beacon_tailroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, local->tx_headroom);
+ skb_put_data(skb, sb->short_head, sb->short_head_len);
+
+ ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template);
+
+ if (sb->short_tail)
+ skb_put_data(skb, sb->short_tail, sb->short_tail_len);
+
+ ieee80211_beacon_get_finish(hw, vif, link, NULL, NULL, skb,
+ chanctx_conf, 0);
+ return skb;
+}
+
+static struct sk_buff *
+ieee80211_beacon_get_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_mutable_offsets *offs,
+ bool is_template, struct beacon_data *beacon,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ u8 ema_index, struct s1g_short_beacon_data *s1g_sb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (!sdata->vif.cfg.s1g || !s1g_sb ||
+ ieee80211_s1g_need_long_beacon(sdata, link))
+ return __ieee80211_beacon_get_ap(hw, vif, link, offs,
+ is_template, beacon,
+ chanctx_conf, ema_index);
+
+ return ieee80211_s1g_short_beacon_get(hw, vif, link, chanctx_conf,
+ s1g_sb, is_template);
+}
+
static struct ieee80211_ema_beacons *
ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5367,7 +5553,7 @@ ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw,
ieee80211_beacon_get_ap(hw, vif, link,
&ema->bcn[ema->cnt].offs,
is_template, beacon,
- chanctx_conf, ema->cnt);
+ chanctx_conf, ema->cnt, NULL);
if (!ema->bcn[ema->cnt].skb)
break;
}
@@ -5396,6 +5582,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_link_data *link;
+ struct s1g_short_beacon_data *s1g_short_bcn = NULL;
rcu_read_lock();
@@ -5417,6 +5604,13 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (!beacon)
goto out;
+ if (vif->cfg.s1g && link->u.ap.s1g_short_beacon) {
+ s1g_short_bcn =
+ rcu_dereference(link->u.ap.s1g_short_beacon);
+ if (!s1g_short_bcn)
+ goto out;
+ }
+
if (ema_beacons) {
*ema_beacons =
ieee80211_beacon_get_ap_ema_list(hw, vif, link,
@@ -5437,8 +5631,8 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
skb = ieee80211_beacon_get_ap(hw, vif, link, offs,
is_template, beacon,
- chanctx_conf,
- ema_index);
+ chanctx_conf, ema_index,
+ s1g_short_bcn);
}
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -5450,7 +5644,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- __ieee80211_beacon_update_cntdwn(beacon);
+ __ieee80211_beacon_update_cntdwn(link, beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
@@ -5482,7 +5676,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
* for now we leave it consistent with overall
* mac80211's behavior.
*/
- __ieee80211_beacon_update_cntdwn(beacon);
+ __ieee80211_beacon_update_cntdwn(link, beacon);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
@@ -6098,7 +6292,9 @@ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
enum nl80211_band band;
rcu_read_lock();
- if (!ieee80211_vif_is_mld(&sdata->vif)) {
+ if (sdata->vif.type == NL80211_IFTYPE_NAN) {
+ band = NUM_NL80211_BANDS;
+ } else if (!ieee80211_vif_is_mld(&sdata->vif)) {
WARN_ON(link_id >= 0);
chanctx_conf =
rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 27d414efa3fd..c9931537f9d2 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1826,13 +1826,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* setup fragmentation threshold */
- drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+ drv_set_frag_threshold(local, -1, hw->wiphy->frag_threshold);
/* setup RTS threshold */
- drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+ if (hw->wiphy->n_radio > 0) {
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ u32 rts_threshold =
+ hw->wiphy->radio_cfg[i].rts_threshold;
+
+ drv_set_rts_threshold(local, i, rts_threshold);
+ }
+ } else {
+ drv_set_rts_threshold(local, -1, hw->wiphy->rts_threshold);
+ }
/* reset coverage class */
- drv_set_coverage_class(local, hw->wiphy->coverage_class);
+ drv_set_coverage_class(local, -1, hw->wiphy->coverage_class);
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
@@ -1890,11 +1899,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
- IEEE80211_CONF_CHANGE_MONITOR |
- IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_RETRY_LIMITS |
- IEEE80211_CONF_CHANGE_IDLE);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -2144,11 +2153,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0);
wake_up:
-
- if (local->virt_monitors > 0 &&
- local->virt_monitors == local->open_count)
- ieee80211_add_virtual_monitor(local);
-
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -2202,6 +2206,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ if (local->virt_monitors > 0 &&
+ local->virt_monitors == local->open_count)
+ ieee80211_add_virtual_monitor(local);
+
if (!suspended)
return 0;
@@ -2548,6 +2556,23 @@ end:
return 0;
}
+int ieee80211_put_reg_conn(struct sk_buff *skb,
+ enum ieee80211_channel_flags flags)
+{
+ u8 reg_conn = IEEE80211_REG_CONN_LPI_VALID |
+ IEEE80211_REG_CONN_LPI_VALUE |
+ IEEE80211_REG_CONN_SP_VALID;
+
+ if (!(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT))
+ reg_conn |= IEEE80211_REG_CONN_SP_VALUE;
+
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, 1 + sizeof(reg_conn));
+ skb_put_u8(skb, WLAN_EID_EXT_NON_AP_STA_REG_CON);
+ skb_put_u8(skb, reg_conn);
+ return 0;
+}
+
int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
@@ -3174,10 +3199,11 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
return true;
}
-bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
+bool ieee80211_chandef_s1g_oper(struct ieee80211_local *local,
+ const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef)
{
- u32 oper_freq;
+ u32 oper_khz, pri_1mhz_khz, pri_2mhz_khz;
if (!oper)
return false;
@@ -3202,12 +3228,36 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
return false;
}
- oper_freq = ieee80211_channel_to_freq_khz(oper->oper_ch,
- NL80211_BAND_S1GHZ);
- chandef->center_freq1 = KHZ_TO_MHZ(oper_freq);
- chandef->freq1_offset = oper_freq % 1000;
+ chandef->s1g_primary_2mhz = false;
- return true;
+ switch (u8_get_bits(oper->ch_width, S1G_OPER_CH_WIDTH_PRIMARY)) {
+ case IEEE80211_S1G_PRI_CHANWIDTH_1MHZ:
+ pri_1mhz_khz = ieee80211_channel_to_freq_khz(
+ oper->primary_ch, NL80211_BAND_S1GHZ);
+ break;
+ case IEEE80211_S1G_PRI_CHANWIDTH_2MHZ:
+ chandef->s1g_primary_2mhz = true;
+ pri_2mhz_khz = ieee80211_channel_to_freq_khz(
+ oper->primary_ch, NL80211_BAND_S1GHZ);
+
+ if (u8_get_bits(oper->ch_width, S1G_OPER_CH_PRIMARY_LOCATION) ==
+ S1G_2M_PRIMARY_LOCATION_LOWER)
+ pri_1mhz_khz = pri_2mhz_khz - 500;
+ else
+ pri_1mhz_khz = pri_2mhz_khz + 500;
+ break;
+ default:
+ return false;
+ }
+
+ oper_khz = ieee80211_channel_to_freq_khz(oper->oper_ch,
+ NL80211_BAND_S1GHZ);
+ chandef->center_freq1 = KHZ_TO_MHZ(oper_khz);
+ chandef->freq1_offset = oper_khz % 1000;
+ chandef->chan =
+ ieee80211_get_channel_khz(local->hw.wiphy, pri_1mhz_khz);
+
+ return chandef->chan;
}
int ieee80211_put_srates_elem(struct sk_buff *skb,
@@ -3265,14 +3315,24 @@ int ieee80211_put_srates_elem(struct sk_buff *skb,
return 0;
}
-int ieee80211_ave_rssi(struct ieee80211_vif *vif)
+int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link_data;
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return 0;
- return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
+ if (link_id < 0)
+ link_data = &sdata->deflink;
+ else
+ link_data = wiphy_dereference(sdata->local->hw.wiphy,
+ sdata->link[link_id]);
+
+ if (WARN_ON_ONCE(!link_data))
+ return -99;
+
+ return -ewma_beacon_signal_read(&link_data->u.mgd.ave_beacon_signal);
}
EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
@@ -3879,12 +3939,10 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
}
EXPORT_SYMBOL(ieee80211_parse_p2p_noa);
-void ieee80211_recalc_dtim(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_dtim(struct ieee80211_sub_if_data *sdata, u64 tsf)
{
- u64 tsf = drv_get_tsf(local, sdata);
u64 dtim_count = 0;
- u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
+ u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
u8 dtim_period = sdata->vif.bss_conf.dtim_period;
struct ps_data *ps;
u8 bcns_from_dtim;
@@ -3920,6 +3978,33 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
ps->dtim_count = dtim_count;
}
+/*
+ * Given a long beacon period, calculate the current index into
+ * that period to determine the number of TSBTTs until the next TBTT.
+ * It is completely valid to have a short beacon period that differs
+ * from the dtim period (i.e a TBTT thats not a DTIM).
+ */
+void ieee80211_recalc_sb_count(struct ieee80211_sub_if_data *sdata, u64 tsf)
+{
+ u32 sb_idx;
+ struct ps_data *ps = &sdata->bss->ps;
+ u8 lb_period = sdata->vif.bss_conf.s1g_long_beacon_period;
+ u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
+
+ /* No mesh / IBSS support for short beaconing */
+ if (tsf == -1ULL || !lb_period ||
+ (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
+ return;
+
+ /* find the current TSBTT index in our lb_period */
+ do_div(tsf, beacon_int);
+ sb_idx = do_div(tsf, lb_period);
+
+ /* num TSBTTs until the next TBTT */
+ ps->sb_count = sb_idx ? lb_period - sb_idx : 0;
+}
+
static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
@@ -3953,6 +4038,30 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
return radar_detect;
}
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+ struct cfg80211_scan_request *scan_req,
+ int radio_idx)
+{
+ struct ieee80211_channel *chan;
+ int i, chan_radio_idx;
+
+ for (i = 0; i < scan_req->n_channels; i++) {
+ chan = scan_req->channels[i];
+ chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+
+ /* The radio index either matched successfully, or an error
+ * occurred. For example, if radio-level information is
+ * missing, the same error value is returned. This
+ * typically implies a single-radio setup, in which case
+ * the operation should not be allowed.
+ */
+ if (chan_radio_idx == radio_idx)
+ return true;
+ }
+
+ return false;
+}
+
static u32
__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata)
{
@@ -4428,3 +4537,11 @@ void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe)
sizeof(tpe->psd_reg_client[i].power));
}
}
+
+bool ieee80211_vif_nan_started(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ return vif->type == NL80211_IFTYPE_NAN && sdata->u.nan.started;
+}
+EXPORT_SYMBOL_GPL(ieee80211_vif_nan_started);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index c5c5d16ed6c8..b099d79e8fbb 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -672,8 +672,9 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
}
} else {
- pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
- link_sta->pub->addr, nss);
+ sdata_dbg(sdata,
+ "Ignore NSS change to invalid %d in VHT opmode notif from %pM",
+ nss, link_sta->pub->addr);
}
}
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index 9b12ca97f412..b99ba14f39d2 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -53,6 +53,7 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
struct sock *sk = sock->sk;
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ struct net *net = sock_net(&msk->sk);
struct sockaddr_mctp *smctp;
int rc;
@@ -73,14 +74,48 @@ static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
lock_sock(sk);
- /* TODO: allow rebind */
if (sk_hashed(sk)) {
rc = -EADDRINUSE;
goto out_release;
}
- msk->bind_net = smctp->smctp_network;
- msk->bind_addr = smctp->smctp_addr.s_addr;
- msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
+
+ msk->bind_local_addr = smctp->smctp_addr.s_addr;
+
+ /* MCTP_NET_ANY with a specific EID is resolved to the default net
+ * at bind() time.
+ * For bind_addr=MCTP_ADDR_ANY it is handled specially at route
+ * lookup time.
+ */
+ if (smctp->smctp_network == MCTP_NET_ANY &&
+ msk->bind_local_addr != MCTP_ADDR_ANY) {
+ msk->bind_net = mctp_default_net(net);
+ } else {
+ msk->bind_net = smctp->smctp_network;
+ }
+
+ /* ignore the IC bit */
+ smctp->smctp_type &= 0x7f;
+
+ if (msk->bind_peer_set) {
+ if (msk->bind_type != smctp->smctp_type) {
+ /* Prior connect() had a different type */
+ rc = -EINVAL;
+ goto out_release;
+ }
+
+ if (msk->bind_net == MCTP_NET_ANY) {
+ /* Restrict to the network passed to connect() */
+ msk->bind_net = msk->bind_peer_net;
+ }
+
+ if (msk->bind_net != msk->bind_peer_net) {
+ /* connect() had a different net to bind() */
+ rc = -EINVAL;
+ goto out_release;
+ }
+ } else {
+ msk->bind_type = smctp->smctp_type;
+ }
rc = sk->sk_prot->hash(sk);
@@ -90,6 +125,67 @@ out_release:
return rc;
}
+/* Used to set a specific peer prior to bind. Not used for outbound
+ * connections (Tag Owner set) since MCTP is a datagram protocol.
+ */
+static int mctp_connect(struct socket *sock, struct sockaddr *addr,
+ int addrlen, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ struct net *net = sock_net(&msk->sk);
+ struct sockaddr_mctp *smctp;
+ int rc;
+
+ if (addrlen != sizeof(*smctp))
+ return -EINVAL;
+
+ if (addr->sa_family != AF_MCTP)
+ return -EAFNOSUPPORT;
+
+ /* It's a valid sockaddr for MCTP, cast and do protocol checks */
+ smctp = (struct sockaddr_mctp *)addr;
+
+ if (!mctp_sockaddr_is_ok(smctp))
+ return -EINVAL;
+
+ /* Can't bind by tag */
+ if (smctp->smctp_tag)
+ return -EINVAL;
+
+ /* IC bit must be unset */
+ if (smctp->smctp_type & 0x80)
+ return -EINVAL;
+
+ lock_sock(sk);
+
+ if (sk_hashed(sk)) {
+ /* bind() already */
+ rc = -EADDRINUSE;
+ goto out_release;
+ }
+
+ if (msk->bind_peer_set) {
+ /* connect() already */
+ rc = -EADDRINUSE;
+ goto out_release;
+ }
+
+ msk->bind_peer_set = true;
+ msk->bind_peer_addr = smctp->smctp_addr.s_addr;
+ msk->bind_type = smctp->smctp_type;
+ if (smctp->smctp_network == MCTP_NET_ANY)
+ msk->bind_peer_net = mctp_default_net(net);
+ else
+ msk->bind_peer_net = smctp->smctp_network;
+
+ rc = 0;
+
+out_release:
+ release_sock(sk);
+ return rc;
+}
+
static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
@@ -97,8 +193,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct sock *sk = sock->sk;
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
struct mctp_skb_cb *cb;
- struct mctp_route *rt;
struct sk_buff *skb = NULL;
+ struct mctp_dst dst;
int hlen;
if (addr) {
@@ -133,38 +229,34 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
extaddr, msg->msg_name);
- struct net_device *dev;
-
- rc = -EINVAL;
- rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
- /* check for correct halen */
- if (dev && extaddr->smctp_halen == dev->addr_len) {
- hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
- rc = 0;
- }
- rcu_read_unlock();
+
+ if (!mctp_sockaddr_ext_is_ok(extaddr))
+ return -EINVAL;
+
+ rc = mctp_dst_from_extaddr(&dst, sock_net(sk),
+ extaddr->smctp_ifindex,
+ extaddr->smctp_halen,
+ extaddr->smctp_haddr);
if (rc)
- goto err_free;
- rt = NULL;
+ return rc;
+
} else {
- rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
- addr->smctp_addr.s_addr);
- if (!rt) {
- rc = -EHOSTUNREACH;
- goto err_free;
- }
- hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
+ rc = mctp_route_lookup(sock_net(sk), addr->smctp_network,
+ addr->smctp_addr.s_addr, &dst);
+ if (rc)
+ return rc;
}
+ hlen = LL_RESERVED_SPACE(dst.dev->dev) + sizeof(struct mctp_hdr);
+
skb = sock_alloc_send_skb(sk, hlen + 1 + len,
msg->msg_flags & MSG_DONTWAIT, &rc);
if (!skb)
- return rc;
+ goto err_release_dst;
skb_reserve(skb, hlen);
- /* set type as fist byte in payload */
+ /* set type as first byte in payload */
*(u8 *)skb_put(skb, 1) = addr->smctp_type;
rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
@@ -175,30 +267,16 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
cb = __mctp_cb(skb);
cb->net = addr->smctp_network;
- if (!rt) {
- /* fill extended address in cb */
- DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
- extaddr, msg->msg_name);
-
- if (!mctp_sockaddr_ext_is_ok(extaddr) ||
- extaddr->smctp_halen > sizeof(cb->haddr)) {
- rc = -EINVAL;
- goto err_free;
- }
-
- cb->ifindex = extaddr->smctp_ifindex;
- /* smctp_halen is checked above */
- cb->halen = extaddr->smctp_halen;
- memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
- }
-
- rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
+ rc = mctp_local_output(sk, &dst, skb, addr->smctp_addr.s_addr,
addr->smctp_tag);
+ mctp_dst_release(&dst);
return rc ? : len;
err_free:
kfree_skb(skb);
+err_release_dst:
+ mctp_dst_release(&dst);
return rc;
}
@@ -347,7 +425,7 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
- return -EINVAL;
+ return -ENOPROTOOPT;
}
/* helpers for reading/writing the tag ioc, handling compatibility across the
@@ -551,7 +629,7 @@ static const struct proto_ops mctp_dgram_ops = {
.family = PF_MCTP,
.release = mctp_release,
.bind = mctp_bind,
- .connect = sock_no_connect,
+ .connect = mctp_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
@@ -618,6 +696,7 @@ static int mctp_sk_init(struct sock *sk)
INIT_HLIST_HEAD(&msk->keys);
timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
+ msk->bind_peer_set = false;
return 0;
}
@@ -629,15 +708,48 @@ static void mctp_sk_close(struct sock *sk, long timeout)
static int mctp_sk_hash(struct sock *sk)
{
struct net *net = sock_net(sk);
+ struct sock *existing;
+ struct mctp_sock *msk;
+ mctp_eid_t remote;
+ u32 hash;
+ int rc;
+
+ msk = container_of(sk, struct mctp_sock, sk);
+
+ if (msk->bind_peer_set)
+ remote = msk->bind_peer_addr;
+ else
+ remote = MCTP_ADDR_ANY;
+ hash = mctp_bind_hash(msk->bind_type, msk->bind_local_addr, remote);
+
+ mutex_lock(&net->mctp.bind_lock);
+
+ /* Prevent duplicate binds. */
+ sk_for_each(existing, &net->mctp.binds[hash]) {
+ struct mctp_sock *mex =
+ container_of(existing, struct mctp_sock, sk);
+
+ bool same_peer = (mex->bind_peer_set && msk->bind_peer_set &&
+ mex->bind_peer_addr == msk->bind_peer_addr) ||
+ (!mex->bind_peer_set && !msk->bind_peer_set);
+
+ if (mex->bind_type == msk->bind_type &&
+ mex->bind_local_addr == msk->bind_local_addr && same_peer &&
+ mex->bind_net == msk->bind_net) {
+ rc = -EADDRINUSE;
+ goto out;
+ }
+ }
/* Bind lookup runs under RCU, remain live during that. */
sock_set_flag(sk, SOCK_RCU_FREE);
- mutex_lock(&net->mctp.bind_lock);
- sk_add_node_rcu(sk, &net->mctp.binds);
- mutex_unlock(&net->mctp.bind_lock);
+ sk_add_node_rcu(sk, &net->mctp.binds[hash]);
+ rc = 0;
- return 0;
+out:
+ mutex_unlock(&net->mctp.bind_lock);
+ return rc;
}
static void mctp_sk_unhash(struct sock *sk)
@@ -793,3 +905,7 @@ MODULE_DESCRIPTION("MCTP core");
MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
MODULE_ALIAS_NETPROTO(PF_MCTP);
+
+#if IS_ENABLED(CONFIG_MCTP_TEST)
+#include "test/sock-test.c"
+#endif
diff --git a/net/mctp/route.c b/net/mctp/route.c
index d9c8e5a5f9ce..4d314e062ba9 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -17,6 +17,8 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <kunit/static_stub.h>
+
#include <uapi/linux/if_arp.h>
#include <net/mctp.h>
@@ -32,39 +34,42 @@ static const unsigned long mctp_key_lifetime = 6 * CONFIG_HZ;
static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev);
/* route output callbacks */
-static int mctp_route_discard(struct mctp_route *route, struct sk_buff *skb)
+static int mctp_dst_discard(struct mctp_dst *dst, struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
-static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
+static struct mctp_sock *mctp_lookup_bind_details(struct net *net,
+ struct sk_buff *skb,
+ u8 type, u8 dest,
+ u8 src, bool allow_net_any)
{
struct mctp_skb_cb *cb = mctp_cb(skb);
- struct mctp_hdr *mh;
struct sock *sk;
- u8 type;
+ u8 hash;
- WARN_ON(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held());
- /* TODO: look up in skb->cb? */
- mh = mctp_hdr(skb);
-
- if (!skb_headlen(skb))
- return NULL;
+ hash = mctp_bind_hash(type, dest, src);
- type = (*(u8 *)skb->data) & 0x7f;
-
- sk_for_each_rcu(sk, &net->mctp.binds) {
+ sk_for_each_rcu(sk, &net->mctp.binds[hash]) {
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
+ if (!allow_net_any && msk->bind_net == MCTP_NET_ANY)
+ continue;
+
if (msk->bind_net != MCTP_NET_ANY && msk->bind_net != cb->net)
continue;
if (msk->bind_type != type)
continue;
- if (!mctp_address_matches(msk->bind_addr, mh->dest))
+ if (msk->bind_peer_set &&
+ !mctp_address_matches(msk->bind_peer_addr, src))
+ continue;
+
+ if (!mctp_address_matches(msk->bind_local_addr, dest))
continue;
return msk;
@@ -73,6 +78,54 @@ static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
return NULL;
}
+static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
+{
+ struct mctp_sock *msk;
+ struct mctp_hdr *mh;
+ u8 type;
+
+ /* TODO: look up in skb->cb? */
+ mh = mctp_hdr(skb);
+
+ if (!skb_headlen(skb))
+ return NULL;
+
+ type = (*(u8 *)skb->data) & 0x7f;
+
+ /* Look for binds in order of widening scope. A given destination or
+ * source address also implies matching on a particular network.
+ *
+ * - Matching destination and source
+ * - Matching destination
+ * - Matching source
+ * - Matching network, any address
+ * - Any network or address
+ */
+
+ msk = mctp_lookup_bind_details(net, skb, type, mh->dest, mh->src,
+ false);
+ if (msk)
+ return msk;
+ msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY, mh->src,
+ false);
+ if (msk)
+ return msk;
+ msk = mctp_lookup_bind_details(net, skb, type, mh->dest, MCTP_ADDR_ANY,
+ false);
+ if (msk)
+ return msk;
+ msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY,
+ MCTP_ADDR_ANY, false);
+ if (msk)
+ return msk;
+ msk = mctp_lookup_bind_details(net, skb, type, MCTP_ADDR_ANY,
+ MCTP_ADDR_ANY, true);
+ if (msk)
+ return msk;
+
+ return NULL;
+}
+
/* A note on the key allocations.
*
* struct net->mctp.keys contains our set of currently-allocated keys for
@@ -325,6 +378,7 @@ static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) {}
#endif
+/* takes ownership of skb, both in success and failure cases */
static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
{
struct mctp_hdr *hdr = mctp_hdr(skb);
@@ -334,8 +388,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
& MCTP_HDR_SEQ_MASK;
if (!key->reasm_head) {
- /* Since we're manipulating the shared frag_list, ensure it isn't
- * shared with any other SKBs.
+ /* Since we're manipulating the shared frag_list, ensure it
+ * isn't shared with any other SKBs. In the cloned case,
+ * this will free the skb; callers can no longer access it
+ * safely.
*/
key->reasm_head = skb_unshare(skb, GFP_ATOMIC);
if (!key->reasm_head)
@@ -349,10 +405,10 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
exp_seq = (key->last_seq + 1) & MCTP_HDR_SEQ_MASK;
if (this_seq != exp_seq)
- return -EINVAL;
+ goto err_free;
if (key->reasm_head->len + skb->len > mctp_message_maxlen)
- return -EINVAL;
+ goto err_free;
skb->next = NULL;
skb->sk = NULL;
@@ -366,9 +422,13 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb)
key->reasm_head->truesize += skb->truesize;
return 0;
+
+err_free:
+ kfree_skb(skb);
+ return -EINVAL;
}
-static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+static int mctp_dst_input(struct mctp_dst *dst, struct sk_buff *skb)
{
struct mctp_sk_key *key, *any_key = NULL;
struct net *net = dev_net(skb->dev);
@@ -392,6 +452,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
*/
skb_orphan(skb);
+ if (skb->pkt_type == PACKET_OUTGOING)
+ skb->pkt_type = PACKET_LOOPBACK;
+
/* ensure we have enough data for a header and a type */
if (skb->len < sizeof(struct mctp_hdr) + 1)
goto out;
@@ -476,18 +539,16 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key isn't observable yet
*/
mctp_frag_queue(key, skb);
+ skb = NULL;
/* if the key_add fails, we've raced with another
* SOM packet with the same src, dest and tag. There's
* no way to distinguish future packets, so all we
- * can do is drop; we'll free the skb on exit from
- * this function.
+ * can do is drop.
*/
rc = mctp_key_add(key, msk);
- if (!rc) {
+ if (!rc)
trace_mctp_key_acquire(key);
- skb = NULL;
- }
/* we don't need to release key->lock on exit, so
* clean up here and suppress the unlock via
@@ -505,8 +566,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
- if (!rc)
- skb = NULL;
+ skb = NULL;
}
}
@@ -516,17 +576,16 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
*/
/* we need to be continuing an existing reassembly... */
- if (!key->reasm_head)
+ if (!key->reasm_head) {
rc = -EINVAL;
- else
+ } else {
rc = mctp_frag_queue(key, skb);
+ skb = NULL;
+ }
if (rc)
goto out_unlock;
- /* we've queued; the queue owns the skb now */
- skb = NULL;
-
/* end of message? deliver to socket, and we're done with
* the reassembly/response key
*/
@@ -556,39 +615,31 @@ out:
return rc;
}
-static unsigned int mctp_route_mtu(struct mctp_route *rt)
+static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
{
- return rt->mtu ?: READ_ONCE(rt->dev->dev->mtu);
-}
-
-static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
-{
- struct mctp_skb_cb *cb = mctp_cb(skb);
- struct mctp_hdr *hdr = mctp_hdr(skb);
char daddr_buf[MAX_ADDR_LEN];
char *daddr = NULL;
- unsigned int mtu;
int rc;
skb->protocol = htons(ETH_P_MCTP);
+ skb->pkt_type = PACKET_OUTGOING;
- mtu = READ_ONCE(skb->dev->mtu);
- if (skb->len > mtu) {
+ if (skb->len > dst->mtu) {
kfree_skb(skb);
return -EMSGSIZE;
}
- if (cb->ifindex) {
- /* direct route; use the hwaddr we stashed in sendmsg */
- if (cb->halen != skb->dev->addr_len) {
+ /* direct route; use the hwaddr we stashed in sendmsg */
+ if (dst->halen) {
+ if (dst->halen != skb->dev->addr_len) {
/* sanity check, sendmsg should have already caught this */
kfree_skb(skb);
return -EMSGSIZE;
}
- daddr = cb->haddr;
+ daddr = dst->haddr;
} else {
/* If lookup fails let the device handle daddr==NULL */
- if (mctp_neigh_lookup(route->dev, hdr->dest, daddr_buf) == 0)
+ if (mctp_neigh_lookup(dst->dev, dst->nexthop, daddr_buf) == 0)
daddr = daddr_buf;
}
@@ -599,7 +650,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
return -EHOSTUNREACH;
}
- mctp_flow_prepare_output(skb, route->dev);
+ mctp_flow_prepare_output(skb, dst->dev);
rc = dev_queue_xmit(skb);
if (rc)
@@ -612,7 +663,8 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
static void mctp_route_release(struct mctp_route *rt)
{
if (refcount_dec_and_test(&rt->refs)) {
- mctp_dev_put(rt->dev);
+ if (rt->dst_type == MCTP_ROUTE_DIRECT)
+ mctp_dev_put(rt->dev);
kfree_rcu(rt, rcu);
}
}
@@ -628,7 +680,7 @@ static struct mctp_route *mctp_route_alloc(void)
INIT_LIST_HEAD(&rt->list);
refcount_set(&rt->refs, 1);
- rt->output = mctp_route_discard;
+ rt->output = mctp_dst_discard;
return rt;
}
@@ -801,10 +853,16 @@ static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
}
/* routing lookups */
+static unsigned int mctp_route_netid(struct mctp_route *rt)
+{
+ return rt->dst_type == MCTP_ROUTE_DIRECT ?
+ READ_ONCE(rt->dev->net) : rt->gateway.net;
+}
+
static bool mctp_rt_match_eid(struct mctp_route *rt,
unsigned int net, mctp_eid_t eid)
{
- return READ_ONCE(rt->dev->net) == net &&
+ return mctp_route_netid(rt) == net &&
rt->min <= eid && rt->max >= eid;
}
@@ -813,54 +871,150 @@ static bool mctp_rt_compare_exact(struct mctp_route *rt1,
struct mctp_route *rt2)
{
ASSERT_RTNL();
- return rt1->dev->net == rt2->dev->net &&
+ return mctp_route_netid(rt1) == mctp_route_netid(rt2) &&
rt1->min == rt2->min &&
rt1->max == rt2->max;
}
-struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
- mctp_eid_t daddr)
+/* must only be called on a direct route, as the final output hop */
+static void mctp_dst_from_route(struct mctp_dst *dst, mctp_eid_t eid,
+ unsigned int mtu, struct mctp_route *route)
{
- struct mctp_route *tmp, *rt = NULL;
+ mctp_dev_hold(route->dev);
+ dst->nexthop = eid;
+ dst->dev = route->dev;
+ dst->mtu = READ_ONCE(dst->dev->dev->mtu);
+ if (mtu)
+ dst->mtu = min(dst->mtu, mtu);
+ dst->halen = 0;
+ dst->output = route->output;
+}
+
+int mctp_dst_from_extaddr(struct mctp_dst *dst, struct net *net, int ifindex,
+ unsigned char halen, const unsigned char *haddr)
+{
+ struct net_device *netdev;
+ struct mctp_dev *dev;
+ int rc = -ENOENT;
+
+ if (halen > sizeof(dst->haddr))
+ return -EINVAL;
rcu_read_lock();
- list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
- /* TODO: add metrics */
- if (mctp_rt_match_eid(tmp, dnet, daddr)) {
- if (refcount_inc_not_zero(&tmp->refs)) {
- rt = tmp;
- break;
- }
- }
+ netdev = dev_get_by_index_rcu(net, ifindex);
+ if (!netdev)
+ goto out_unlock;
+
+ if (netdev->addr_len != halen) {
+ rc = -EINVAL;
+ goto out_unlock;
}
+ dev = __mctp_dev_get(netdev);
+ if (!dev)
+ goto out_unlock;
+
+ dst->dev = dev;
+ dst->mtu = READ_ONCE(netdev->mtu);
+ dst->halen = halen;
+ dst->output = mctp_dst_output;
+ dst->nexthop = 0;
+ memcpy(dst->haddr, haddr, halen);
+
+ rc = 0;
+
+out_unlock:
rcu_read_unlock();
+ return rc;
+}
- return rt;
+void mctp_dst_release(struct mctp_dst *dst)
+{
+ mctp_dev_put(dst->dev);
}
-static struct mctp_route *mctp_route_lookup_null(struct net *net,
- struct net_device *dev)
+static struct mctp_route *mctp_route_lookup_single(struct net *net,
+ unsigned int dnet,
+ mctp_eid_t daddr)
{
- struct mctp_route *tmp, *rt = NULL;
+ struct mctp_route *rt;
+
+ list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
+ if (mctp_rt_match_eid(rt, dnet, daddr))
+ return rt;
+ }
+
+ return NULL;
+}
+
+/* populates *dst on successful lookup, if set */
+int mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr, struct mctp_dst *dst)
+{
+ const unsigned int max_depth = 32;
+ unsigned int depth, mtu = 0;
+ int rc = -EHOSTUNREACH;
rcu_read_lock();
- list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
- if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
- refcount_inc_not_zero(&tmp->refs)) {
- rt = tmp;
+ for (depth = 0; depth < max_depth; depth++) {
+ struct mctp_route *rt;
+
+ rt = mctp_route_lookup_single(net, dnet, daddr);
+ if (!rt)
+ break;
+
+ /* clamp mtu to the smallest in the path, allowing 0
+ * to specify no restrictions
+ */
+ if (mtu && rt->mtu)
+ mtu = min(mtu, rt->mtu);
+ else
+ mtu = mtu ?: rt->mtu;
+
+ if (rt->dst_type == MCTP_ROUTE_DIRECT) {
+ if (dst)
+ mctp_dst_from_route(dst, daddr, mtu, rt);
+ rc = 0;
break;
+
+ } else if (rt->dst_type == MCTP_ROUTE_GATEWAY) {
+ daddr = rt->gateway.eid;
}
}
rcu_read_unlock();
- return rt;
+ return rc;
}
-static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
+static int mctp_route_lookup_null(struct net *net, struct net_device *dev,
+ struct mctp_dst *dst)
+{
+ int rc = -EHOSTUNREACH;
+ struct mctp_route *rt;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
+ if (rt->dst_type != MCTP_ROUTE_DIRECT || rt->type != RTN_LOCAL)
+ continue;
+
+ if (rt->dev->dev != dev)
+ continue;
+
+ mctp_dst_from_route(dst, 0, 0, rt);
+ rc = 0;
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int mctp_do_fragment_route(struct mctp_dst *dst, struct sk_buff *skb,
unsigned int mtu, u8 tag)
{
const unsigned int hlen = sizeof(struct mctp_hdr);
@@ -933,7 +1087,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
skb_ext_copy(skb2, skb);
/* do route */
- rc = rt->output(rt, skb2);
+ rc = dst->output(dst, skb2);
if (rc)
break;
@@ -945,68 +1099,34 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
return rc;
}
-int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+int mctp_local_output(struct sock *sk, struct mctp_dst *dst,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag)
{
struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
- struct mctp_skb_cb *cb = mctp_cb(skb);
- struct mctp_route tmp_rt = {0};
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
unsigned int netid;
unsigned int mtu;
mctp_eid_t saddr;
- bool ext_rt;
int rc;
u8 tag;
- rc = -ENODEV;
-
- if (rt) {
- ext_rt = false;
- if (WARN_ON(!rt->dev))
- goto out_release;
-
- } else if (cb->ifindex) {
- struct net_device *dev;
-
- ext_rt = true;
- rt = &tmp_rt;
-
- rcu_read_lock();
- dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
- if (!dev) {
- rcu_read_unlock();
- goto out_free;
- }
- rt->dev = __mctp_dev_get(dev);
- rcu_read_unlock();
-
- if (!rt->dev)
- goto out_release;
-
- /* establish temporary route - we set up enough to keep
- * mctp_route_output happy
- */
- rt->output = mctp_route_output;
- rt->mtu = 0;
+ KUNIT_STATIC_STUB_REDIRECT(mctp_local_output, sk, dst, skb, daddr,
+ req_tag);
- } else {
- rc = -EINVAL;
- goto out_free;
- }
+ rc = -ENODEV;
- spin_lock_irqsave(&rt->dev->addrs_lock, flags);
- if (rt->dev->num_addrs == 0) {
+ spin_lock_irqsave(&dst->dev->addrs_lock, flags);
+ if (dst->dev->num_addrs == 0) {
rc = -EHOSTUNREACH;
} else {
/* use the outbound interface's first address as our source */
- saddr = rt->dev->addrs[0];
+ saddr = dst->dev->addrs[0];
rc = 0;
}
- spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
- netid = READ_ONCE(rt->dev->net);
+ spin_unlock_irqrestore(&dst->dev->addrs_lock, flags);
+ netid = READ_ONCE(dst->dev->net);
if (rc)
goto out_release;
@@ -1032,15 +1152,13 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
tag = req_tag & MCTP_TAG_MASK;
}
+ skb->pkt_type = PACKET_OUTGOING;
skb->protocol = htons(ETH_P_MCTP);
skb->priority = 0;
skb_reset_transport_header(skb);
skb_push(skb, sizeof(struct mctp_hdr));
skb_reset_network_header(skb);
- skb->dev = rt->dev->dev;
-
- /* cb->net will have been set on initial ingress */
- cb->src = saddr;
+ skb->dev = dst->dev->dev;
/* set up common header fields */
hdr = mctp_hdr(skb);
@@ -1048,73 +1166,64 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
hdr->dest = daddr;
hdr->src = saddr;
- mtu = mctp_route_mtu(rt);
+ mtu = dst->mtu;
if (skb->len + sizeof(struct mctp_hdr) <= mtu) {
hdr->flags_seq_tag = MCTP_HDR_FLAG_SOM |
MCTP_HDR_FLAG_EOM | tag;
- rc = rt->output(rt, skb);
+ rc = dst->output(dst, skb);
} else {
- rc = mctp_do_fragment_route(rt, skb, mtu, tag);
+ rc = mctp_do_fragment_route(dst, skb, mtu, tag);
}
/* route output functions consume the skb, even on error */
skb = NULL;
out_release:
- if (!ext_rt)
- mctp_route_release(rt);
-
- mctp_dev_put(tmp_rt.dev);
-
-out_free:
kfree_skb(skb);
return rc;
}
/* route management */
-static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
- unsigned int daddr_extent, unsigned int mtu,
- unsigned char type)
+
+/* mctp_route_add(): Add the provided route, previously allocated via
+ * mctp_route_alloc(). On success, takes ownership of @rt, which includes a
+ * hold on rt->dev for usage in the route table. On failure a caller will want
+ * to mctp_route_release().
+ *
+ * We expect that the caller has set rt->type, rt->dst_type, rt->min, rt->max,
+ * rt->mtu and either rt->dev (with a reference held appropriately) or
+ * rt->gateway. Other fields will be populated.
+ */
+static int mctp_route_add(struct net *net, struct mctp_route *rt)
{
- int (*rtfn)(struct mctp_route *rt, struct sk_buff *skb);
- struct net *net = dev_net(mdev->dev);
- struct mctp_route *rt, *ert;
+ struct mctp_route *ert;
- if (!mctp_address_unicast(daddr_start))
+ if (!mctp_address_unicast(rt->min) || !mctp_address_unicast(rt->max))
return -EINVAL;
- if (daddr_extent > 0xff || daddr_start + daddr_extent >= 255)
+ if (rt->dst_type == MCTP_ROUTE_DIRECT && !rt->dev)
+ return -EINVAL;
+
+ if (rt->dst_type == MCTP_ROUTE_GATEWAY && !rt->gateway.eid)
return -EINVAL;
- switch (type) {
+ switch (rt->type) {
case RTN_LOCAL:
- rtfn = mctp_route_input;
+ rt->output = mctp_dst_input;
break;
case RTN_UNICAST:
- rtfn = mctp_route_output;
+ rt->output = mctp_dst_output;
break;
default:
return -EINVAL;
}
- rt = mctp_route_alloc();
- if (!rt)
- return -ENOMEM;
-
- rt->min = daddr_start;
- rt->max = daddr_start + daddr_extent;
- rt->mtu = mtu;
- rt->dev = mdev;
- mctp_dev_hold(rt->dev);
- rt->type = type;
- rt->output = rtfn;
-
ASSERT_RTNL();
+
/* Prevent duplicate identical routes. */
list_for_each_entry(ert, &net->mctp.routes, list) {
if (mctp_rt_compare_exact(rt, ert)) {
- mctp_route_release(rt);
return -EEXIST;
}
}
@@ -1124,10 +1233,10 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
return 0;
}
-static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
- unsigned int daddr_extent, unsigned char type)
+static int mctp_route_remove(struct net *net, unsigned int netid,
+ mctp_eid_t daddr_start, unsigned int daddr_extent,
+ unsigned char type)
{
- struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
mctp_eid_t daddr_end;
bool dropped;
@@ -1141,7 +1250,7 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
ASSERT_RTNL();
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
- if (rt->dev == mdev &&
+ if (mctp_route_netid(rt) == netid &&
rt->min == daddr_start && rt->max == daddr_end &&
rt->type == type) {
list_del_rcu(&rt->list);
@@ -1156,12 +1265,32 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
- return mctp_route_add(mdev, addr, 0, 0, RTN_LOCAL);
+ struct mctp_route *rt;
+ int rc;
+
+ rt = mctp_route_alloc();
+ if (!rt)
+ return -ENOMEM;
+
+ rt->min = addr;
+ rt->max = addr;
+ rt->dst_type = MCTP_ROUTE_DIRECT;
+ rt->dev = mdev;
+ rt->type = RTN_LOCAL;
+
+ mctp_dev_hold(rt->dev);
+
+ rc = mctp_route_add(dev_net(mdev->dev), rt);
+ if (rc)
+ mctp_route_release(rt);
+
+ return rc;
}
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
- return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
+ return mctp_route_remove(dev_net(mdev->dev), mdev->net,
+ addr, 0, RTN_LOCAL);
}
/* removes all entries for a given device */
@@ -1172,7 +1301,7 @@ void mctp_route_remove_dev(struct mctp_dev *mdev)
ASSERT_RTNL();
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
- if (rt->dev == mdev) {
+ if (rt->dst_type == MCTP_ROUTE_DIRECT && rt->dev == mdev) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
@@ -1189,8 +1318,9 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
struct net *net = dev_net(dev);
struct mctp_dev *mdev;
struct mctp_skb_cb *cb;
- struct mctp_route *rt;
+ struct mctp_dst dst;
struct mctp_hdr *mh;
+ int rc;
rcu_read_lock();
mdev = __mctp_dev_get(dev);
@@ -1232,17 +1362,17 @@ static int mctp_pkttype_receive(struct sk_buff *skb, struct net_device *dev,
cb->net = READ_ONCE(mdev->net);
cb->ifindex = dev->ifindex;
- rt = mctp_route_lookup(net, cb->net, mh->dest);
+ rc = mctp_route_lookup(net, cb->net, mh->dest, &dst);
/* NULL EID, but addressed to our physical address */
- if (!rt && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST)
- rt = mctp_route_lookup_null(net, dev);
+ if (rc && mh->dest == MCTP_ADDR_NULL && skb->pkt_type == PACKET_HOST)
+ rc = mctp_route_lookup_null(net, dev, &dst);
- if (!rt)
+ if (rc)
goto err_drop;
- rt->output(rt, skb);
- mctp_route_release(rt);
+ dst.output(&dst, skb);
+ mctp_dst_release(&dst);
mctp_dev_put(mdev);
return NET_RX_SUCCESS;
@@ -1264,19 +1394,28 @@ static const struct nla_policy rta_mctp_policy[RTA_MAX + 1] = {
[RTA_DST] = { .type = NLA_U8 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_OIF] = { .type = NLA_U32 },
+ [RTA_GATEWAY] = NLA_POLICY_EXACT_LEN(sizeof(struct mctp_fq_addr)),
+};
+
+static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = {
+ [RTAX_MTU] = { .type = NLA_U32 },
};
-/* Common part for RTM_NEWROUTE and RTM_DELROUTE parsing.
- * tb must hold RTA_MAX+1 elements.
+/* base parsing; common to both _lookup and _populate variants.
+ *
+ * For gateway routes (which have a RTA_GATEWAY, and no RTA_OIF), we populate
+ * *gatweayp. for direct routes (RTA_OIF, no RTA_GATEWAY), we populate *mdev.
*/
-static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack,
- struct nlattr **tb, struct rtmsg **rtm,
- struct mctp_dev **mdev, mctp_eid_t *daddr_start)
+static int mctp_route_nlparse_common(struct net *net, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb, struct rtmsg **rtm,
+ struct mctp_dev **mdev,
+ struct mctp_fq_addr *gatewayp,
+ mctp_eid_t *daddr_start)
{
- struct net *net = sock_net(skb->sk);
+ struct mctp_fq_addr *gateway = NULL;
+ unsigned int ifindex = 0;
struct net_device *dev;
- unsigned int ifindex;
int rc;
rc = nlmsg_parse(nlh, sizeof(struct rtmsg), tb, RTA_MAX,
@@ -1292,11 +1431,44 @@ static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
}
*daddr_start = nla_get_u8(tb[RTA_DST]);
- if (!tb[RTA_OIF]) {
- NL_SET_ERR_MSG(extack, "ifindex missing");
+ if (tb[RTA_OIF])
+ ifindex = nla_get_u32(tb[RTA_OIF]);
+
+ if (tb[RTA_GATEWAY])
+ gateway = nla_data(tb[RTA_GATEWAY]);
+
+ if (ifindex && gateway) {
+ NL_SET_ERR_MSG(extack,
+ "cannot specify both ifindex and gateway");
+ return -EINVAL;
+
+ } else if (ifindex) {
+ dev = __dev_get_by_index(net, ifindex);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "bad ifindex");
+ return -ENODEV;
+ }
+ *mdev = mctp_dev_get_rtnl(dev);
+ if (!*mdev)
+ return -ENODEV;
+ gatewayp->eid = 0;
+
+ } else if (gateway) {
+ if (!mctp_address_unicast(gateway->eid)) {
+ NL_SET_ERR_MSG(extack, "bad gateway");
+ return -EINVAL;
+ }
+
+ gatewayp->eid = gateway->eid;
+ gatewayp->net = gateway->net != MCTP_NET_ANY ?
+ gateway->net :
+ READ_ONCE(net->mctp.default_net);
+ *mdev = NULL;
+
+ } else {
+ NL_SET_ERR_MSG(extack, "no route output provided");
return -EINVAL;
}
- ifindex = nla_get_u32(tb[RTA_OIF]);
*rtm = nlmsg_data(nlh);
if ((*rtm)->rtm_family != AF_MCTP) {
@@ -1304,82 +1476,157 @@ static int mctp_route_nlparse(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
- dev = __dev_get_by_index(net, ifindex);
- if (!dev) {
- NL_SET_ERR_MSG(extack, "bad ifindex");
- return -ENODEV;
- }
- *mdev = mctp_dev_get_rtnl(dev);
- if (!*mdev)
- return -ENODEV;
-
- if (dev->flags & IFF_LOOPBACK) {
- NL_SET_ERR_MSG(extack, "no routes to loopback");
+ if ((*rtm)->rtm_type != RTN_UNICAST) {
+ NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
return -EINVAL;
}
return 0;
}
-static const struct nla_policy rta_metrics_policy[RTAX_MAX + 1] = {
- [RTAX_MTU] = { .type = NLA_U32 },
-};
-
-static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+/* Route parsing for lookup operations; we only need the "route target"
+ * components (ie., network and dest-EID range).
+ */
+static int mctp_route_nlparse_lookup(struct net *net, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ unsigned char *type, unsigned int *netid,
+ mctp_eid_t *daddr_start,
+ unsigned int *daddr_extent)
{
struct nlattr *tb[RTA_MAX + 1];
+ struct mctp_fq_addr gw;
+ struct mctp_dev *mdev;
+ struct rtmsg *rtm;
+ int rc;
+
+ rc = mctp_route_nlparse_common(net, nlh, extack, tb, &rtm,
+ &mdev, &gw, daddr_start);
+ if (rc)
+ return rc;
+
+ if (mdev) {
+ *netid = mdev->net;
+ } else if (gw.eid) {
+ *netid = gw.net;
+ } else {
+ /* bug: _nlparse_common should not allow this */
+ return -1;
+ }
+
+ *type = rtm->rtm_type;
+ *daddr_extent = rtm->rtm_dst_len;
+
+ return 0;
+}
+
+/* Full route parse for RTM_NEWROUTE: populate @rt. On success,
+ * MCTP_ROUTE_DIRECT routes (ie, those with a direct dev) will hold a reference
+ * to that dev.
+ */
+static int mctp_route_nlparse_populate(struct net *net, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack,
+ struct mctp_route *rt)
+{
struct nlattr *tbx[RTAX_MAX + 1];
+ struct nlattr *tb[RTA_MAX + 1];
+ unsigned int daddr_extent;
+ struct mctp_fq_addr gw;
mctp_eid_t daddr_start;
- struct mctp_dev *mdev;
+ struct mctp_dev *dev;
struct rtmsg *rtm;
- unsigned int mtu;
+ u32 mtu = 0;
int rc;
- rc = mctp_route_nlparse(skb, nlh, extack, tb,
- &rtm, &mdev, &daddr_start);
- if (rc < 0)
+ rc = mctp_route_nlparse_common(net, nlh, extack, tb, &rtm,
+ &dev, &gw, &daddr_start);
+ if (rc)
return rc;
- if (rtm->rtm_type != RTN_UNICAST) {
- NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST");
+ daddr_extent = rtm->rtm_dst_len;
+
+ if (daddr_extent > 0xff || daddr_extent + daddr_start >= 255) {
+ NL_SET_ERR_MSG(extack, "invalid eid range");
return -EINVAL;
}
- mtu = 0;
if (tb[RTA_METRICS]) {
rc = nla_parse_nested(tbx, RTAX_MAX, tb[RTA_METRICS],
rta_metrics_policy, NULL);
- if (rc < 0)
+ if (rc < 0) {
+ NL_SET_ERR_MSG(extack, "incorrect RTA_METRICS format");
return rc;
+ }
if (tbx[RTAX_MTU])
mtu = nla_get_u32(tbx[RTAX_MTU]);
}
- rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu,
- rtm->rtm_type);
+ rt->type = rtm->rtm_type;
+ rt->min = daddr_start;
+ rt->max = daddr_start + daddr_extent;
+ rt->mtu = mtu;
+ if (gw.eid) {
+ rt->dst_type = MCTP_ROUTE_GATEWAY;
+ rt->gateway.eid = gw.eid;
+ rt->gateway.net = gw.net;
+ } else {
+ rt->dst_type = MCTP_ROUTE_DIRECT;
+ rt->dev = dev;
+ mctp_dev_hold(rt->dev);
+ }
+
+ return 0;
+}
+
+static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct mctp_route *rt;
+ int rc;
+
+ rt = mctp_route_alloc();
+ if (!rt)
+ return -ENOMEM;
+
+ rc = mctp_route_nlparse_populate(net, nlh, extack, rt);
+ if (rc < 0)
+ goto err_free;
+
+ if (rt->dst_type == MCTP_ROUTE_DIRECT &&
+ rt->dev->dev->flags & IFF_LOOPBACK) {
+ NL_SET_ERR_MSG(extack, "no routes to loopback");
+ rc = -EINVAL;
+ goto err_free;
+ }
+
+ rc = mctp_route_add(net, rt);
+ if (!rc)
+ return 0;
+
+err_free:
+ mctp_route_release(rt);
return rc;
}
static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[RTA_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ unsigned int netid, daddr_extent;
+ unsigned char type = RTN_UNSPEC;
mctp_eid_t daddr_start;
- struct mctp_dev *mdev;
- struct rtmsg *rtm;
int rc;
- rc = mctp_route_nlparse(skb, nlh, extack, tb,
- &rtm, &mdev, &daddr_start);
+ rc = mctp_route_nlparse_lookup(net, nlh, extack, &type, &netid,
+ &daddr_start, &daddr_extent);
if (rc < 0)
return rc;
/* we only have unicast routes */
- if (rtm->rtm_type != RTN_UNICAST)
+ if (type != RTN_UNICAST)
return -EINVAL;
- rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
+ rc = mctp_route_remove(net, netid, daddr_start, daddr_extent, type);
return rc;
}
@@ -1405,7 +1652,6 @@ static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
hdr->rtm_tos = 0;
hdr->rtm_table = RT_TABLE_DEFAULT;
hdr->rtm_protocol = RTPROT_STATIC; /* everything is user-defined */
- hdr->rtm_scope = RT_SCOPE_LINK; /* TODO: scope in mctp_route? */
hdr->rtm_type = rt->type;
if (nla_put_u8(skb, RTA_DST, rt->min))
@@ -1422,13 +1668,17 @@ static int mctp_fill_rtinfo(struct sk_buff *skb, struct mctp_route *rt,
nla_nest_end(skb, metrics);
- if (rt->dev) {
+ if (rt->dst_type == MCTP_ROUTE_DIRECT) {
+ hdr->rtm_scope = RT_SCOPE_LINK;
if (nla_put_u32(skb, RTA_OIF, rt->dev->dev->ifindex))
goto cancel;
+ } else if (rt->dst_type == MCTP_ROUTE_GATEWAY) {
+ hdr->rtm_scope = RT_SCOPE_UNIVERSE;
+ if (nla_put(skb, RTA_GATEWAY,
+ sizeof(rt->gateway), &rt->gateway))
+ goto cancel;
}
- /* TODO: conditional neighbour physaddr? */
-
nlmsg_end(skb, nlh);
return 0;
@@ -1475,7 +1725,7 @@ static int __net_init mctp_routes_net_init(struct net *net)
struct netns_mctp *ns = &net->mctp;
INIT_LIST_HEAD(&ns->routes);
- INIT_HLIST_HEAD(&ns->binds);
+ hash_init(ns->binds);
mutex_init(&ns->bind_lock);
INIT_HLIST_HEAD(&ns->keys);
spin_lock_init(&ns->keys_lock);
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 06c1897b685a..69a3ccfc6310 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -2,132 +2,11 @@
#include <kunit/test.h>
-#include "utils.h"
-
-struct mctp_test_route {
- struct mctp_route rt;
- struct sk_buff_head pkts;
-};
-
-static int mctp_test_route_output(struct mctp_route *rt, struct sk_buff *skb)
-{
- struct mctp_test_route *test_rt = container_of(rt, struct mctp_test_route, rt);
-
- skb_queue_tail(&test_rt->pkts, skb);
-
- return 0;
-}
-
-/* local version of mctp_route_alloc() */
-static struct mctp_test_route *mctp_route_test_alloc(void)
-{
- struct mctp_test_route *rt;
+/* keep clangd happy when compiled outside of the route.c include */
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
- rt = kzalloc(sizeof(*rt), GFP_KERNEL);
- if (!rt)
- return NULL;
-
- INIT_LIST_HEAD(&rt->rt.list);
- refcount_set(&rt->rt.refs, 1);
- rt->rt.output = mctp_test_route_output;
-
- skb_queue_head_init(&rt->pkts);
-
- return rt;
-}
-
-static struct mctp_test_route *mctp_test_create_route(struct net *net,
- struct mctp_dev *dev,
- mctp_eid_t eid,
- unsigned int mtu)
-{
- struct mctp_test_route *rt;
-
- rt = mctp_route_test_alloc();
- if (!rt)
- return NULL;
-
- rt->rt.min = eid;
- rt->rt.max = eid;
- rt->rt.mtu = mtu;
- rt->rt.type = RTN_UNSPEC;
- if (dev)
- mctp_dev_hold(dev);
- rt->rt.dev = dev;
-
- list_add_rcu(&rt->rt.list, &net->mctp.routes);
-
- return rt;
-}
-
-static void mctp_test_route_destroy(struct kunit *test,
- struct mctp_test_route *rt)
-{
- unsigned int refs;
-
- rtnl_lock();
- list_del_rcu(&rt->rt.list);
- rtnl_unlock();
-
- skb_queue_purge(&rt->pkts);
- if (rt->rt.dev)
- mctp_dev_put(rt->rt.dev);
-
- refs = refcount_read(&rt->rt.refs);
- KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance");
-
- kfree_rcu(&rt->rt, rcu);
-}
-
-static void mctp_test_skb_set_dev(struct sk_buff *skb,
- struct mctp_test_dev *dev)
-{
- struct mctp_skb_cb *cb;
-
- cb = mctp_cb(skb);
- cb->net = READ_ONCE(dev->mdev->net);
- skb->dev = dev->ndev;
-}
-
-static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
- unsigned int data_len)
-{
- size_t hdr_len = sizeof(*hdr);
- struct sk_buff *skb;
- unsigned int i;
- u8 *buf;
-
- skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- __mctp_cb(skb);
- memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
-
- buf = skb_put(skb, data_len);
- for (i = 0; i < data_len; i++)
- buf[i] = i & 0xff;
-
- return skb;
-}
-
-static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
- const void *data,
- size_t data_len)
-{
- size_t hdr_len = sizeof(*hdr);
- struct sk_buff *skb;
-
- skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
- if (!skb)
- return NULL;
-
- __mctp_cb(skb);
- memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
- memcpy(skb_put(skb, data_len), data, data_len);
-
- return skb;
-}
+#include "utils.h"
#define mctp_test_create_skb_data(h, d) \
__mctp_test_create_skb_data(h, d, sizeof(*d))
@@ -141,8 +20,10 @@ struct mctp_frag_test {
static void mctp_test_fragment(struct kunit *test)
{
const struct mctp_frag_test *params;
+ struct mctp_test_pktqueue tpq;
int rc, i, n, mtu, msgsize;
- struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct sk_buff *skb;
struct mctp_hdr hdr;
u8 seq;
@@ -159,13 +40,15 @@ static void mctp_test_fragment(struct kunit *test)
skb = mctp_test_create_skb(&hdr, msgsize);
KUNIT_ASSERT_TRUE(test, skb);
- rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
- KUNIT_ASSERT_TRUE(test, rt);
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ mctp_test_dst_setup(test, &dst, dev, &tpq, mtu);
- rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
+ rc = mctp_do_fragment_route(&dst, skb, mtu, MCTP_TAG_OWNER);
KUNIT_EXPECT_FALSE(test, rc);
- n = rt->pkts.qlen;
+ n = tpq.pkts.qlen;
KUNIT_EXPECT_EQ(test, n, params->n_frags);
@@ -178,7 +61,7 @@ static void mctp_test_fragment(struct kunit *test)
first = i == 0;
last = i == (n - 1);
- skb2 = skb_dequeue(&rt->pkts);
+ skb2 = skb_dequeue(&tpq.pkts);
if (!skb2)
break;
@@ -216,7 +99,8 @@ static void mctp_test_fragment(struct kunit *test)
kfree_skb(skb2);
}
- mctp_test_route_destroy(test, rt);
+ mctp_test_dst_release(&dst, &tpq);
+ mctp_test_destroy_dev(dev);
}
static const struct mctp_frag_test mctp_frag_tests[] = {
@@ -246,25 +130,30 @@ struct mctp_rx_input_test {
static void mctp_test_rx_input(struct kunit *test)
{
const struct mctp_rx_input_test *params;
+ struct mctp_test_pktqueue tpq;
struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct sk_buff *skb;
params = test->param_value;
+ test->priv = &tpq;
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
+ rt = mctp_test_create_route_direct(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
skb = mctp_test_create_skb(&params->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+ mctp_test_pktqueue_init(&tpq);
+
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
- KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
+ KUNIT_EXPECT_EQ(test, !!tpq.pkts.qlen, params->input);
+ skb_queue_purge(&tpq.pkts);
mctp_test_route_destroy(test, rt);
mctp_test_destroy_dev(dev);
}
@@ -292,12 +181,12 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
/* set up a local dev, route on EID 8, and a socket listening on type 0 */
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
- struct mctp_test_route **rtp,
+ struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq,
struct socket **sockp,
unsigned int netid)
{
struct sockaddr_mctp addr = {0};
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct socket *sock;
int rc;
@@ -307,8 +196,7 @@ static void __mctp_route_test_init(struct kunit *test,
if (netid != MCTP_NET_ANY)
WRITE_ONCE(dev->mdev->net, netid);
- rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+ mctp_test_dst_setup(test, dst, dev, tpq, 68);
rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
KUNIT_ASSERT_EQ(test, rc, 0);
@@ -320,18 +208,18 @@ static void __mctp_route_test_init(struct kunit *test,
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
KUNIT_ASSERT_EQ(test, rc, 0);
- *rtp = rt;
*devp = dev;
*sockp = sock;
}
static void __mctp_route_test_fini(struct kunit *test,
struct mctp_test_dev *dev,
- struct mctp_test_route *rt,
+ struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq,
struct socket *sock)
{
sock_release(sock);
- mctp_test_route_destroy(test, rt);
+ mctp_test_dst_release(dst, tpq);
mctp_test_destroy_dev(dev);
}
@@ -344,22 +232,24 @@ struct mctp_route_input_sk_test {
static void mctp_test_route_input_sk(struct kunit *test)
{
const struct mctp_route_input_sk_test *params;
+ struct mctp_test_pktqueue tpq;
struct sk_buff *skb, *skb2;
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct socket *sock;
int rc;
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
skb = mctp_test_create_skb_data(&params->hdr, &params->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
mctp_test_skb_set_dev(skb, dev);
+ mctp_test_pktqueue_init(&tpq);
- rc = mctp_route_input(&rt->rt, skb);
+ rc = mctp_dst_input(&dst, skb);
if (params->deliver) {
KUNIT_EXPECT_EQ(test, rc, 0);
@@ -376,7 +266,7 @@ static void mctp_test_route_input_sk(struct kunit *test)
KUNIT_EXPECT_NULL(test, skb2);
}
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
#define FL_S (MCTP_HDR_FLAG_SOM)
@@ -413,16 +303,17 @@ struct mctp_route_input_sk_reasm_test {
static void mctp_test_route_input_sk_reasm(struct kunit *test)
{
const struct mctp_route_input_sk_reasm_test *params;
+ struct mctp_test_pktqueue tpq;
struct sk_buff *skb, *skb2;
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct socket *sock;
int i, rc;
u8 c;
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
@@ -431,7 +322,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
mctp_test_skb_set_dev(skb, dev);
- rc = mctp_route_input(&rt->rt, skb);
+ rc = mctp_dst_input(&dst, skb);
}
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
@@ -445,7 +336,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
KUNIT_EXPECT_NULL(test, skb2);
}
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
#define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_TO | (f) | ((s) << MCTP_HDR_SEQ_SHIFT))
@@ -547,7 +438,7 @@ struct mctp_route_input_sk_keys_test {
static void mctp_test_route_input_sk_keys(struct kunit *test)
{
const struct mctp_route_input_sk_keys_test *params;
- struct mctp_test_route *rt;
+ struct mctp_test_pktqueue tpq;
struct sk_buff *skb, *skb2;
struct mctp_test_dev *dev;
struct mctp_sk_key *key;
@@ -555,6 +446,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
+ struct mctp_dst dst;
unsigned int net;
int rc;
u8 c;
@@ -565,8 +457,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
net = READ_ONCE(dev->mdev->net);
- rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+ mctp_test_dst_setup(test, &dst, dev, &tpq, 68);
rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
KUNIT_ASSERT_EQ(test, rc, 0);
@@ -592,7 +483,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
mctp_test_skb_set_dev(skb, dev);
- rc = mctp_route_input(&rt->rt, skb);
+ rc = mctp_dst_input(&dst, skb);
/* (potentially) receive message */
skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
@@ -606,7 +497,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
skb_free_datagram(sock->sk, skb2);
mctp_key_unref(key);
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
static const struct mctp_route_input_sk_keys_test mctp_route_input_sk_keys_tests[] = {
@@ -681,7 +572,8 @@ KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
struct test_net {
unsigned int netid;
struct mctp_test_dev *dev;
- struct mctp_test_route *rt;
+ struct mctp_test_pktqueue tpq;
+ struct mctp_dst dst;
struct socket *sock;
struct sk_buff *skb;
struct mctp_sk_key *key;
@@ -699,18 +591,20 @@ mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
t->msg.data = t->netid;
- __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+ __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock,
+ t->netid);
t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
mctp_test_skb_set_dev(t->skb, t->dev);
+ mctp_test_pktqueue_init(&t->tpq);
}
static void
mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
struct test_net *t)
{
- __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+ __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock);
}
/* Test that skbs from different nets (otherwise identical) get routed to their
@@ -731,9 +625,9 @@ static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
mctp_test_route_input_multiple_nets_bind_init(test, &t1);
mctp_test_route_input_multiple_nets_bind_init(test, &t2);
- rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ rc = mctp_dst_input(&t1.dst, t1.skb);
KUNIT_ASSERT_EQ(test, rc, 0);
- rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ rc = mctp_dst_input(&t2.dst, t2.skb);
KUNIT_ASSERT_EQ(test, rc, 0);
rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
@@ -767,7 +661,8 @@ mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
t->msg.data = t->netid;
- __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+ __mctp_route_test_init(test, &t->dev, &t->dst, &t->tpq, &t->sock,
+ t->netid);
msk = container_of(t->sock->sk, struct mctp_sock, sk);
@@ -790,7 +685,7 @@ mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
struct test_net *t)
{
mctp_key_unref(t->key);
- __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+ __mctp_route_test_fini(test, t->dev, &t->dst, &t->tpq, t->sock);
}
/* test that skbs from different nets (otherwise identical) get routed to their
@@ -812,9 +707,9 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
mctp_test_route_input_multiple_nets_key_init(test, &t1);
mctp_test_route_input_multiple_nets_key_init(test, &t2);
- rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ rc = mctp_dst_input(&t1.dst, t1.skb);
KUNIT_ASSERT_EQ(test, rc, 0);
- rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ rc = mctp_dst_input(&t2.dst, t2.skb);
KUNIT_ASSERT_EQ(test, rc, 0);
rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
@@ -843,13 +738,14 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
static void mctp_test_route_input_sk_fail_single(struct kunit *test)
{
const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
- struct mctp_test_route *rt;
+ struct mctp_test_pktqueue tpq;
struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct socket *sock;
struct sk_buff *skb;
int rc;
- __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
/* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
* clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
@@ -865,14 +761,14 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test)
mctp_test_skb_set_dev(skb, dev);
/* do route input, which should fail */
- rc = mctp_route_input(&rt->rt, skb);
+ rc = mctp_dst_input(&dst, skb);
KUNIT_EXPECT_NE(test, rc, 0);
/* we should hold the only reference to skb */
KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
kfree_skb(skb);
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
/* Input route to socket, using a fragmented message, where sock delivery fails.
@@ -880,14 +776,15 @@ static void mctp_test_route_input_sk_fail_single(struct kunit *test)
static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
{
const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
- struct mctp_test_route *rt;
+ struct mctp_test_pktqueue tpq;
struct mctp_test_dev *dev;
struct sk_buff *skbs[2];
+ struct mctp_dst dst;
struct socket *sock;
unsigned int i;
int rc;
- __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
lock_sock(sock->sk);
WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
@@ -904,11 +801,11 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
/* first route input should succeed, we're only queueing to the
* frag list
*/
- rc = mctp_route_input(&rt->rt, skbs[0]);
+ rc = mctp_dst_input(&dst, skbs[0]);
KUNIT_EXPECT_EQ(test, rc, 0);
/* final route input should fail to deliver to the socket */
- rc = mctp_route_input(&rt->rt, skbs[1]);
+ rc = mctp_dst_input(&dst, skbs[1]);
KUNIT_EXPECT_NE(test, rc, 0);
/* we should hold the only reference to both skbs */
@@ -918,7 +815,7 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
kfree_skb(skbs[1]);
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
/* Input route to socket, using a fragmented message created from clones.
@@ -933,23 +830,22 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test)
RX_FRAG(FL_S, 0),
RX_FRAG(FL_E, 1),
};
- struct mctp_test_route *rt;
+ const size_t data_len = 3; /* arbitrary */
+ u8 compare[3 * ARRAY_SIZE(hdrs)];
+ u8 flat[3 * ARRAY_SIZE(hdrs)];
+ struct mctp_test_pktqueue tpq;
struct mctp_test_dev *dev;
struct sk_buff *skb[5];
struct sk_buff *rx_skb;
+ struct mctp_dst dst;
struct socket *sock;
- size_t data_len;
- u8 compare[100];
- u8 flat[100];
size_t total;
void *p;
int rc;
- /* Arbitrary length */
- data_len = 3;
total = data_len + sizeof(struct mctp_hdr);
- __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
/* Create a single skb initially with concatenated packets */
skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total);
@@ -988,7 +884,7 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test)
/* Feed the fragments into MCTP core */
for (int i = 0; i < 5; i++) {
- rc = mctp_route_input(&rt->rt, skb[i]);
+ rc = mctp_dst_input(&dst, skb[i]);
KUNIT_EXPECT_EQ(test, rc, 0);
}
@@ -1026,29 +922,29 @@ static void mctp_test_route_input_cloned_frag(struct kunit *test)
kfree_skb(skb[i]);
}
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
}
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
static void mctp_test_flow_init(struct kunit *test,
struct mctp_test_dev **devp,
- struct mctp_test_route **rtp,
+ struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq,
struct socket **sock,
struct sk_buff **skbp,
unsigned int len)
{
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct sk_buff *skb;
/* we have a slightly odd routing setup here; the test route
* is for EID 8, which is our local EID. We don't do a routing
* lookup, so that's fine - all we require is a path through
- * mctp_local_output, which will call rt->output on whatever
+ * mctp_local_output, which will call dst->output on whatever
* route we provide
*/
- __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
+ __mctp_route_test_init(test, &dev, dst, tpq, sock, MCTP_NET_ANY);
/* Assign a single EID. ->addrs is freed on mctp netdev release */
dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
@@ -1061,42 +957,41 @@ static void mctp_test_flow_init(struct kunit *test,
skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
memset(skb_put(skb, len), 0, len);
- /* take a ref for the route, we'll decrement in local output */
- refcount_inc(&rt->rt.refs);
*devp = dev;
- *rtp = rt;
*skbp = skb;
}
static void mctp_test_flow_fini(struct kunit *test,
struct mctp_test_dev *dev,
- struct mctp_test_route *rt,
+ struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq,
struct socket *sock)
{
- __mctp_route_test_fini(test, dev, rt, sock);
+ __mctp_route_test_fini(test, dev, dst, tpq, sock);
}
/* test that an outgoing skb has the correct MCTP extension data set */
static void mctp_test_packet_flow(struct kunit *test)
{
+ struct mctp_test_pktqueue tpq;
struct sk_buff *skb, *skb2;
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct mctp_flow *flow;
struct socket *sock;
- u8 dst = 8;
+ u8 dst_eid = 8;
int n, rc;
- mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
+ mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 30);
- rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER);
KUNIT_ASSERT_EQ(test, rc, 0);
- n = rt->pkts.qlen;
+ n = tpq.pkts.qlen;
KUNIT_ASSERT_EQ(test, n, 1);
- skb2 = skb_dequeue(&rt->pkts);
+ skb2 = skb_dequeue(&tpq.pkts);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
flow = skb_ext_find(skb2, SKB_EXT_MCTP);
@@ -1105,7 +1000,7 @@ static void mctp_test_packet_flow(struct kunit *test)
KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
kfree_skb(skb2);
- mctp_test_flow_fini(test, dev, rt, sock);
+ mctp_test_flow_fini(test, dev, &dst, &tpq, sock);
}
/* test that outgoing skbs, after fragmentation, all have the correct MCTP
@@ -1113,26 +1008,27 @@ static void mctp_test_packet_flow(struct kunit *test)
*/
static void mctp_test_fragment_flow(struct kunit *test)
{
+ struct mctp_test_pktqueue tpq;
struct mctp_flow *flows[2];
struct sk_buff *tx_skbs[2];
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
+ struct mctp_dst dst;
struct sk_buff *skb;
struct socket *sock;
- u8 dst = 8;
+ u8 dst_eid = 8;
int n, rc;
- mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
+ mctp_test_flow_init(test, &dev, &dst, &tpq, &sock, &skb, 100);
- rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER);
KUNIT_ASSERT_EQ(test, rc, 0);
- n = rt->pkts.qlen;
+ n = tpq.pkts.qlen;
KUNIT_ASSERT_EQ(test, n, 2);
/* both resulting packets should have the same flow data */
- tx_skbs[0] = skb_dequeue(&rt->pkts);
- tx_skbs[1] = skb_dequeue(&rt->pkts);
+ tx_skbs[0] = skb_dequeue(&tpq.pkts);
+ tx_skbs[1] = skb_dequeue(&tpq.pkts);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
@@ -1148,7 +1044,7 @@ static void mctp_test_fragment_flow(struct kunit *test)
kfree_skb(tx_skbs[0]);
kfree_skb(tx_skbs[1]);
- mctp_test_flow_fini(test, dev, rt, sock);
+ mctp_test_flow_fini(test, dev, &dst, &tpq, sock);
}
#else
@@ -1166,15 +1062,16 @@ static void mctp_test_fragment_flow(struct kunit *test)
/* Test that outgoing skbs cause a suitable tag to be created */
static void mctp_test_route_output_key_create(struct kunit *test)
{
+ const u8 dst_eid = 26, src_eid = 15;
+ struct mctp_test_pktqueue tpq;
const unsigned int netid = 50;
- const u8 dst = 26, src = 15;
- struct mctp_test_route *rt;
struct mctp_test_dev *dev;
struct mctp_sk_key *key;
struct netns_mctp *mns;
unsigned long flags;
struct socket *sock;
struct sk_buff *skb;
+ struct mctp_dst dst;
bool empty, single;
const int len = 2;
int rc;
@@ -1183,15 +1080,14 @@ static void mctp_test_route_output_key_create(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
WRITE_ONCE(dev->mdev->net, netid);
- rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+ mctp_test_dst_setup(test, &dst, dev, &tpq, 68);
rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
KUNIT_ASSERT_EQ(test, rc, 0);
dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
dev->mdev->num_addrs = 1;
- dev->mdev->addrs[0] = src;
+ dev->mdev->addrs[0] = src_eid;
skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, skb);
@@ -1199,8 +1095,6 @@ static void mctp_test_route_output_key_create(struct kunit *test)
skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
memset(skb_put(skb, len), 0, len);
- refcount_inc(&rt->rt.refs);
-
mns = &sock_net(sock->sk)->mctp;
/* We assume we're starting from an empty keys list, which requires
@@ -1211,7 +1105,7 @@ static void mctp_test_route_output_key_create(struct kunit *test)
spin_unlock_irqrestore(&mns->keys_lock, flags);
KUNIT_ASSERT_TRUE(test, empty);
- rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ rc = mctp_local_output(sock->sk, &dst, skb, dst_eid, MCTP_TAG_OWNER);
KUNIT_ASSERT_EQ(test, rc, 0);
key = NULL;
@@ -1227,16 +1121,479 @@ static void mctp_test_route_output_key_create(struct kunit *test)
KUNIT_ASSERT_TRUE(test, single);
KUNIT_EXPECT_EQ(test, key->net, netid);
- KUNIT_EXPECT_EQ(test, key->local_addr, src);
- KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
+ KUNIT_EXPECT_EQ(test, key->local_addr, src_eid);
+ KUNIT_EXPECT_EQ(test, key->peer_addr, dst_eid);
/* key has incoming tag, so inverse of what we sent */
KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
sock_release(sock);
- mctp_test_route_destroy(test, rt);
+ mctp_test_dst_release(&dst, &tpq);
mctp_test_destroy_dev(dev);
}
+static void mctp_test_route_extaddr_input(struct kunit *test)
+{
+ static const unsigned char haddr[] = { 0xaa, 0x55 };
+ struct mctp_test_pktqueue tpq;
+ struct mctp_skb_cb *cb, *cb2;
+ const unsigned int len = 40;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb, *skb2;
+ struct mctp_dst dst;
+ struct mctp_hdr hdr;
+ struct socket *sock;
+ int rc;
+
+ hdr.ver = 1;
+ hdr.src = 10;
+ hdr.dest = 8;
+ hdr.flags_seq_tag = FL_S | FL_E | FL_TO;
+
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock, MCTP_NET_ANY);
+
+ skb = mctp_test_create_skb(&hdr, len);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ /* set our hardware addressing data */
+ cb = mctp_cb(skb);
+ memcpy(cb->haddr, haddr, sizeof(haddr));
+ cb->halen = sizeof(haddr);
+
+ mctp_test_skb_set_dev(skb, dev);
+
+ rc = mctp_dst_input(&dst, skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
+ KUNIT_ASSERT_EQ(test, skb2->len, len);
+
+ cb2 = mctp_cb(skb2);
+
+ /* Received SKB should have the hardware addressing as set above.
+ * We're likely to have the same actual cb here (ie., cb == cb2),
+ * but it's the comparison that we care about
+ */
+ KUNIT_EXPECT_EQ(test, cb2->halen, sizeof(haddr));
+ KUNIT_EXPECT_MEMEQ(test, cb2->haddr, haddr, sizeof(haddr));
+
+ kfree_skb(skb2);
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock);
+}
+
+static void mctp_test_route_gw_lookup(struct kunit *test)
+{
+ struct mctp_test_route *rt1, *rt2;
+ struct mctp_dst dst = { 0 };
+ struct mctp_test_dev *dev;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* 8 (local) -> 10 (gateway) via 9 (direct) */
+ rt1 = mctp_test_create_route_direct(&init_net, dev->mdev, 9, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt1);
+ rt2 = mctp_test_create_route_gw(&init_net, dev->mdev->net, 10, 9, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt2);
+
+ rc = mctp_route_lookup(&init_net, dev->mdev->net, 10, &dst);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ KUNIT_EXPECT_PTR_EQ(test, dst.dev, dev->mdev);
+ KUNIT_EXPECT_EQ(test, dst.mtu, dev->ndev->mtu);
+ KUNIT_EXPECT_EQ(test, dst.nexthop, 9);
+ KUNIT_EXPECT_EQ(test, dst.halen, 0);
+
+ mctp_dst_release(&dst);
+
+ mctp_test_route_destroy(test, rt2);
+ mctp_test_route_destroy(test, rt1);
+ mctp_test_destroy_dev(dev);
+}
+
+static void mctp_test_route_gw_loop(struct kunit *test)
+{
+ struct mctp_test_route *rt1, *rt2;
+ struct mctp_dst dst = { 0 };
+ struct mctp_test_dev *dev;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* two routes using each other as the gw */
+ rt1 = mctp_test_create_route_gw(&init_net, dev->mdev->net, 9, 10, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt1);
+ rt2 = mctp_test_create_route_gw(&init_net, dev->mdev->net, 10, 9, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt2);
+
+ /* this should fail, rather than infinite-loop */
+ rc = mctp_route_lookup(&init_net, dev->mdev->net, 10, &dst);
+ KUNIT_EXPECT_NE(test, rc, 0);
+
+ mctp_test_route_destroy(test, rt2);
+ mctp_test_route_destroy(test, rt1);
+ mctp_test_destroy_dev(dev);
+}
+
+struct mctp_route_gw_mtu_test {
+ /* working away from the local stack */
+ unsigned int dev, neigh, gw, dst;
+ unsigned int exp;
+};
+
+static void mctp_route_gw_mtu_to_desc(const struct mctp_route_gw_mtu_test *t,
+ char *desc)
+{
+ sprintf(desc, "dev %d, neigh %d, gw %d, dst %d -> %d",
+ t->dev, t->neigh, t->gw, t->dst, t->exp);
+}
+
+static const struct mctp_route_gw_mtu_test mctp_route_gw_mtu_tests[] = {
+ /* no route-specific MTUs */
+ { 68, 0, 0, 0, 68 },
+ { 100, 0, 0, 0, 100 },
+ /* one route MTU (smaller than dev mtu), others unrestricted */
+ { 100, 68, 0, 0, 68 },
+ { 100, 0, 68, 0, 68 },
+ { 100, 0, 0, 68, 68 },
+ /* smallest applied, regardless of order */
+ { 100, 99, 98, 68, 68 },
+ { 99, 100, 98, 68, 68 },
+ { 98, 99, 100, 68, 68 },
+ { 68, 98, 99, 100, 68 },
+};
+
+KUNIT_ARRAY_PARAM(mctp_route_gw_mtu, mctp_route_gw_mtu_tests,
+ mctp_route_gw_mtu_to_desc);
+
+static void mctp_test_route_gw_mtu(struct kunit *test)
+{
+ const struct mctp_route_gw_mtu_test *mtus = test->param_value;
+ struct mctp_test_route *rt1, *rt2, *rt3;
+ struct mctp_dst dst = { 0 };
+ struct mctp_test_dev *dev;
+ struct mctp_dev *mdev;
+ unsigned int netid;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ dev->ndev->mtu = mtus->dev;
+ mdev = dev->mdev;
+ netid = mdev->net;
+
+ /* 8 (local) -> 11 (dst) via 10 (gw) via 9 (neigh) */
+ rt1 = mctp_test_create_route_direct(&init_net, mdev, 9, mtus->neigh);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt1);
+
+ rt2 = mctp_test_create_route_gw(&init_net, netid, 10, 9, mtus->gw);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt2);
+
+ rt3 = mctp_test_create_route_gw(&init_net, netid, 11, 10, mtus->dst);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt3);
+
+ rc = mctp_route_lookup(&init_net, dev->mdev->net, 11, &dst);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ KUNIT_EXPECT_EQ(test, dst.mtu, mtus->exp);
+
+ mctp_dst_release(&dst);
+
+ mctp_test_route_destroy(test, rt3);
+ mctp_test_route_destroy(test, rt2);
+ mctp_test_route_destroy(test, rt1);
+ mctp_test_destroy_dev(dev);
+}
+
+#define MCTP_TEST_LLADDR_LEN 2
+struct mctp_test_llhdr {
+ unsigned int magic;
+ unsigned char src[MCTP_TEST_LLADDR_LEN];
+ unsigned char dst[MCTP_TEST_LLADDR_LEN];
+};
+
+static const unsigned int mctp_test_llhdr_magic = 0x5c78339c;
+
+static int test_dev_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ struct kunit *test = current->kunit_test;
+ struct mctp_test_llhdr *hdr;
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hdr);
+ skb_reset_mac_header(skb);
+
+ hdr->magic = mctp_test_llhdr_magic;
+ memcpy(&hdr->src, saddr, sizeof(hdr->src));
+ memcpy(&hdr->dst, daddr, sizeof(hdr->dst));
+
+ return 0;
+}
+
+/* Test the dst_output path for a gateway-routed skb: we should have it
+ * lookup the nexthop EID in the neighbour table, and call into
+ * header_ops->create to resolve that to a lladdr. Our mock header_ops->create
+ * will just set a synthetic link-layer header, which we check after transmit.
+ */
+static void mctp_test_route_gw_output(struct kunit *test)
+{
+ const unsigned char haddr_self[MCTP_TEST_LLADDR_LEN] = { 0xaa, 0x03 };
+ const unsigned char haddr_peer[MCTP_TEST_LLADDR_LEN] = { 0xaa, 0x02 };
+ const struct header_ops ops = {
+ .create = test_dev_header_create,
+ };
+ struct mctp_neigh neigh = { 0 };
+ struct mctp_test_llhdr *ll_hdr;
+ struct mctp_dst dst = { 0 };
+ struct mctp_hdr hdr = { 0 };
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+ unsigned char *buf;
+ int i, rc;
+
+ dev = mctp_test_create_dev_lladdr(sizeof(haddr_self), haddr_self);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ dev->ndev->header_ops = &ops;
+
+ dst.dev = dev->mdev;
+ __mctp_dev_get(dst.dev->dev);
+ dst.mtu = 68;
+ dst.nexthop = 9;
+
+ /* simple mctp_neigh_add for the gateway (not dest!) endpoint */
+ INIT_LIST_HEAD(&neigh.list);
+ neigh.dev = dev->mdev;
+ mctp_dev_hold(dev->mdev);
+ neigh.eid = 9;
+ neigh.source = MCTP_NEIGH_STATIC;
+ memcpy(neigh.ha, haddr_peer, sizeof(haddr_peer));
+ list_add_rcu(&neigh.list, &init_net.mctp.neighbours);
+
+ hdr.ver = 1;
+ hdr.src = 8;
+ hdr.dest = 10;
+ hdr.flags_seq_tag = FL_S | FL_E | FL_TO;
+
+ /* construct enough for a future link-layer header, the provided
+ * mctp header, and 4 bytes of data
+ */
+ skb = alloc_skb(sizeof(*ll_hdr) + sizeof(hdr) + 4, GFP_KERNEL);
+ skb->dev = dev->ndev;
+ __mctp_cb(skb);
+
+ skb_reserve(skb, sizeof(*ll_hdr));
+
+ memcpy(skb_put(skb, sizeof(hdr)), &hdr, sizeof(hdr));
+ buf = skb_put(skb, 4);
+ for (i = 0; i < 4; i++)
+ buf[i] = i;
+
+ /* extra ref over the dev_xmit */
+ skb_get(skb);
+
+ rc = mctp_dst_output(&dst, skb);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+
+ mctp_dst_release(&dst);
+ list_del_rcu(&neigh.list);
+ mctp_dev_put(dev->mdev);
+
+ /* check that we have our header created with the correct neighbour */
+ ll_hdr = (void *)skb_mac_header(skb);
+ KUNIT_EXPECT_EQ(test, ll_hdr->magic, mctp_test_llhdr_magic);
+ KUNIT_EXPECT_MEMEQ(test, ll_hdr->src, haddr_self, sizeof(haddr_self));
+ KUNIT_EXPECT_MEMEQ(test, ll_hdr->dst, haddr_peer, sizeof(haddr_peer));
+ kfree_skb(skb);
+}
+
+struct mctp_bind_lookup_test {
+ /* header of incoming message */
+ struct mctp_hdr hdr;
+ u8 ty;
+ /* mctp network of incoming interface (smctp_network) */
+ unsigned int net;
+
+ /* expected socket, matches .name in lookup_binds, NULL for dropped */
+ const char *expect;
+};
+
+/* Single-packet TO-set message */
+#define LK(src, dst) RX_HDR(1, (src), (dst), FL_S | FL_E | FL_TO)
+
+/* Input message test cases for bind lookup tests.
+ *
+ * 10 and 11 are local EIDs.
+ * 20 and 21 are remote EIDs.
+ */
+static const struct mctp_bind_lookup_test mctp_bind_lookup_tests[] = {
+ /* both local-eid and remote-eid binds, remote eid is preferenced */
+ { .hdr = LK(20, 10), .ty = 1, .net = 1, .expect = "remote20" },
+
+ { .hdr = LK(20, 255), .ty = 1, .net = 1, .expect = "remote20" },
+ { .hdr = LK(20, 0), .ty = 1, .net = 1, .expect = "remote20" },
+ { .hdr = LK(0, 255), .ty = 1, .net = 1, .expect = "any" },
+ { .hdr = LK(0, 11), .ty = 1, .net = 1, .expect = "any" },
+ { .hdr = LK(0, 0), .ty = 1, .net = 1, .expect = "any" },
+ { .hdr = LK(0, 10), .ty = 1, .net = 1, .expect = "local10" },
+ { .hdr = LK(21, 10), .ty = 1, .net = 1, .expect = "local10" },
+ { .hdr = LK(21, 11), .ty = 1, .net = 1, .expect = "remote21local11" },
+
+ /* both src and dest set to eid=99. unusual, but accepted
+ * by MCTP stack currently.
+ */
+ { .hdr = LK(99, 99), .ty = 1, .net = 1, .expect = "any" },
+
+ /* unbound smctp_type */
+ { .hdr = LK(20, 10), .ty = 3, .net = 1, .expect = NULL },
+
+ /* smctp_network tests */
+
+ { .hdr = LK(0, 0), .ty = 1, .net = 7, .expect = "any" },
+ { .hdr = LK(21, 10), .ty = 1, .net = 2, .expect = "any" },
+
+ /* remote EID 20 matches, but MCTP_NET_ANY in "remote20" resolved
+ * to net=1, so lookup doesn't match "remote20"
+ */
+ { .hdr = LK(20, 10), .ty = 1, .net = 3, .expect = "any" },
+
+ { .hdr = LK(21, 10), .ty = 1, .net = 3, .expect = "remote21net3" },
+ { .hdr = LK(21, 10), .ty = 1, .net = 4, .expect = "remote21net4" },
+ { .hdr = LK(21, 10), .ty = 1, .net = 5, .expect = "remote21net5" },
+
+ { .hdr = LK(21, 10), .ty = 1, .net = 5, .expect = "remote21net5" },
+
+ { .hdr = LK(99, 10), .ty = 1, .net = 8, .expect = "local10net8" },
+
+ { .hdr = LK(99, 10), .ty = 1, .net = 9, .expect = "anynet9" },
+ { .hdr = LK(0, 0), .ty = 1, .net = 9, .expect = "anynet9" },
+ { .hdr = LK(99, 99), .ty = 1, .net = 9, .expect = "anynet9" },
+ { .hdr = LK(20, 10), .ty = 1, .net = 9, .expect = "anynet9" },
+};
+
+/* Binds to create during the lookup tests */
+static const struct mctp_test_bind_setup lookup_binds[] = {
+ /* any address and net, type 1 */
+ { .name = "any", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = MCTP_NET_ANY, .bind_type = 1, },
+ /* local eid 10, net 1 (resolved from MCTP_NET_ANY) */
+ { .name = "local10", .bind_addr = 10,
+ .bind_net = MCTP_NET_ANY, .bind_type = 1, },
+ /* local eid 10, net 8 */
+ { .name = "local10net8", .bind_addr = 10,
+ .bind_net = 8, .bind_type = 1, },
+ /* any EID, net 9 */
+ { .name = "anynet9", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = 9, .bind_type = 1, },
+
+ /* remote eid 20, net 1, any local eid */
+ { .name = "remote20", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = MCTP_NET_ANY, .bind_type = 1,
+ .have_peer = true, .peer_addr = 20, .peer_net = MCTP_NET_ANY, },
+
+ /* remote eid 20, net 1, local eid 11 */
+ { .name = "remote21local11", .bind_addr = 11,
+ .bind_net = MCTP_NET_ANY, .bind_type = 1,
+ .have_peer = true, .peer_addr = 21, .peer_net = MCTP_NET_ANY, },
+
+ /* remote eid 21, specific net=3 for connect() */
+ { .name = "remote21net3", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = MCTP_NET_ANY, .bind_type = 1,
+ .have_peer = true, .peer_addr = 21, .peer_net = 3, },
+
+ /* remote eid 21, net 4 for bind, specific net=4 for connect() */
+ { .name = "remote21net4", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = 4, .bind_type = 1,
+ .have_peer = true, .peer_addr = 21, .peer_net = 4, },
+
+ /* remote eid 21, net 5 for bind, specific net=5 for connect() */
+ { .name = "remote21net5", .bind_addr = MCTP_ADDR_ANY,
+ .bind_net = 5, .bind_type = 1,
+ .have_peer = true, .peer_addr = 21, .peer_net = 5, },
+};
+
+static void mctp_bind_lookup_desc(const struct mctp_bind_lookup_test *t,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE,
+ "{src %d dst %d ty %d net %d expect %s}",
+ t->hdr.src, t->hdr.dest, t->ty, t->net, t->expect);
+}
+
+KUNIT_ARRAY_PARAM(mctp_bind_lookup, mctp_bind_lookup_tests,
+ mctp_bind_lookup_desc);
+
+static void mctp_test_bind_lookup(struct kunit *test)
+{
+ const struct mctp_bind_lookup_test *rx;
+ struct socket *socks[ARRAY_SIZE(lookup_binds)];
+ struct sk_buff *skb_pkt = NULL, *skb_sock = NULL;
+ struct socket *sock_ty0, *sock_expect = NULL;
+ struct mctp_test_pktqueue tpq;
+ struct mctp_test_dev *dev;
+ struct mctp_dst dst;
+ int rc;
+
+ rx = test->param_value;
+
+ __mctp_route_test_init(test, &dev, &dst, &tpq, &sock_ty0, rx->net);
+ /* Create all binds */
+ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) {
+ mctp_test_bind_run(test, &lookup_binds[i],
+ &rc, &socks[i]);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ /* Record the expected receive socket */
+ if (rx->expect &&
+ strcmp(rx->expect, lookup_binds[i].name) == 0) {
+ KUNIT_ASSERT_NULL(test, sock_expect);
+ sock_expect = socks[i];
+ }
+ }
+ KUNIT_ASSERT_EQ(test, !!sock_expect, !!rx->expect);
+
+ /* Create test message */
+ skb_pkt = mctp_test_create_skb_data(&rx->hdr, &rx->ty);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb_pkt);
+ mctp_test_skb_set_dev(skb_pkt, dev);
+ mctp_test_pktqueue_init(&tpq);
+
+ rc = mctp_dst_input(&dst, skb_pkt);
+ if (rx->expect) {
+ /* Test the message is received on the expected socket */
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ skb_sock = skb_recv_datagram(sock_expect->sk,
+ MSG_DONTWAIT, &rc);
+ if (!skb_sock) {
+ /* Find which socket received it instead */
+ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++) {
+ skb_sock = skb_recv_datagram(socks[i]->sk,
+ MSG_DONTWAIT, &rc);
+ if (skb_sock) {
+ KUNIT_FAIL(test,
+ "received on incorrect socket '%s', expect '%s'",
+ lookup_binds[i].name,
+ rx->expect);
+ goto cleanup;
+ }
+ }
+ KUNIT_FAIL(test, "no message received");
+ }
+ } else {
+ KUNIT_EXPECT_NE(test, rc, 0);
+ }
+
+cleanup:
+ kfree_skb(skb_sock);
+
+ /* Drop all binds */
+ for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++)
+ sock_release(socks[i]);
+
+ __mctp_route_test_fini(test, dev, &dst, &tpq, sock_ty0);
+}
+
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
@@ -1253,11 +1610,17 @@ static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE(mctp_test_fragment_flow),
KUNIT_CASE(mctp_test_route_output_key_create),
KUNIT_CASE(mctp_test_route_input_cloned_frag),
+ KUNIT_CASE(mctp_test_route_extaddr_input),
+ KUNIT_CASE(mctp_test_route_gw_lookup),
+ KUNIT_CASE(mctp_test_route_gw_loop),
+ KUNIT_CASE_PARAM(mctp_test_route_gw_mtu, mctp_route_gw_mtu_gen_params),
+ KUNIT_CASE(mctp_test_route_gw_output),
+ KUNIT_CASE_PARAM(mctp_test_bind_lookup, mctp_bind_lookup_gen_params),
{}
};
static struct kunit_suite mctp_test_suite = {
- .name = "mctp",
+ .name = "mctp-route",
.test_cases = mctp_test_cases,
};
diff --git a/net/mctp/test/sock-test.c b/net/mctp/test/sock-test.c
new file mode 100644
index 000000000000..b0942deb5019
--- /dev/null
+++ b/net/mctp/test/sock-test.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+
+#include <linux/socket.h>
+#include <linux/spinlock.h>
+
+#include "utils.h"
+
+static const u8 dev_default_lladdr[] = { 0x01, 0x02 };
+
+/* helper for simple sock setup: single device, with dev_default_lladdr as its
+ * hardware address, assigned with a local EID 8, and a route to EID 9
+ */
+static void __mctp_sock_test_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sockp)
+{
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ unsigned long flags;
+ u8 *addrs;
+ int rc;
+
+ dev = mctp_test_create_dev_lladdr(sizeof(dev_default_lladdr),
+ dev_default_lladdr);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ addrs = kmalloc(1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, addrs);
+ addrs[0] = 8;
+
+ spin_lock_irqsave(&dev->mdev->addrs_lock, flags);
+ dev->mdev->num_addrs = 1;
+ swap(addrs, dev->mdev->addrs);
+ spin_unlock_irqrestore(&dev->mdev->addrs_lock, flags);
+
+ kfree(addrs);
+
+ rt = mctp_test_create_route_direct(dev_net(dev->ndev), dev->mdev, 9, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ *devp = dev;
+ *rtp = rt;
+ *sockp = sock;
+}
+
+static void __mctp_sock_test_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
+struct mctp_test_sock_local_output_config {
+ struct mctp_test_dev *dev;
+ size_t halen;
+ u8 haddr[MAX_ADDR_LEN];
+ bool invoked;
+ int rc;
+};
+
+static int mctp_test_sock_local_output(struct sock *sk,
+ struct mctp_dst *dst,
+ struct sk_buff *skb,
+ mctp_eid_t daddr, u8 req_tag)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct mctp_test_sock_local_output_config *cfg = test->priv;
+
+ KUNIT_EXPECT_PTR_EQ(test, dst->dev, cfg->dev->mdev);
+ KUNIT_EXPECT_EQ(test, dst->halen, cfg->halen);
+ KUNIT_EXPECT_MEMEQ(test, dst->haddr, cfg->haddr, dst->halen);
+
+ cfg->invoked = true;
+
+ kfree_skb(skb);
+
+ return cfg->rc;
+}
+
+static void mctp_test_sock_sendmsg_extaddr(struct kunit *test)
+{
+ struct sockaddr_mctp_ext addr = {
+ .smctp_base = {
+ .smctp_family = AF_MCTP,
+ .smctp_tag = MCTP_TAG_OWNER,
+ .smctp_network = MCTP_NET_ANY,
+ },
+ };
+ struct mctp_test_sock_local_output_config cfg = { 0 };
+ u8 haddr[] = { 0xaa, 0x01 };
+ u8 buf[4] = { 0, 1, 2, 3 };
+ struct mctp_test_route *rt;
+ struct msghdr msg = { 0 };
+ struct mctp_test_dev *dev;
+ struct mctp_sock *msk;
+ struct socket *sock;
+ ssize_t send_len;
+ struct kvec vec = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+
+ __mctp_sock_test_init(test, &dev, &rt, &sock);
+
+ /* Expect to see the dst configured up with the addressing data we
+ * provide in the struct sockaddr_mctp_ext
+ */
+ cfg.dev = dev;
+ cfg.halen = sizeof(haddr);
+ memcpy(cfg.haddr, haddr, sizeof(haddr));
+
+ test->priv = &cfg;
+
+ kunit_activate_static_stub(test, mctp_local_output,
+ mctp_test_sock_local_output);
+
+ /* enable and configure direct addressing */
+ msk = container_of(sock->sk, struct mctp_sock, sk);
+ msk->addr_ext = true;
+
+ addr.smctp_ifindex = dev->ndev->ifindex;
+ addr.smctp_halen = sizeof(haddr);
+ memcpy(addr.smctp_haddr, haddr, sizeof(haddr));
+
+ msg.msg_name = &addr;
+ msg.msg_namelen = sizeof(addr);
+
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &vec, 1, sizeof(buf));
+ send_len = mctp_sendmsg(sock, &msg, sizeof(buf));
+ KUNIT_EXPECT_EQ(test, send_len, sizeof(buf));
+ KUNIT_EXPECT_TRUE(test, cfg.invoked);
+
+ __mctp_sock_test_fini(test, dev, rt, sock);
+}
+
+static void mctp_test_sock_recvmsg_extaddr(struct kunit *test)
+{
+ struct sockaddr_mctp_ext recv_addr = { 0 };
+ u8 rcv_buf[1], rcv_data[] = { 0, 1 };
+ u8 haddr[] = { 0xaa, 0x02 };
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_skb_cb *cb;
+ struct mctp_sock *msk;
+ struct sk_buff *skb;
+ struct mctp_hdr hdr;
+ struct socket *sock;
+ struct msghdr msg;
+ ssize_t recv_len;
+ int rc;
+ struct kvec vec = {
+ .iov_base = rcv_buf,
+ .iov_len = sizeof(rcv_buf),
+ };
+
+ __mctp_sock_test_init(test, &dev, &rt, &sock);
+
+ /* enable extended addressing on recv */
+ msk = container_of(sock->sk, struct mctp_sock, sk);
+ msk->addr_ext = true;
+
+ /* base incoming header, using a nul-EID dest */
+ hdr.ver = 1;
+ hdr.dest = 0;
+ hdr.src = 9;
+ hdr.flags_seq_tag = MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM |
+ MCTP_HDR_FLAG_TO;
+
+ skb = mctp_test_create_skb_data(&hdr, &rcv_data);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+
+ mctp_test_skb_set_dev(skb, dev);
+
+ /* set incoming extended address data */
+ cb = mctp_cb(skb);
+ cb->halen = sizeof(haddr);
+ cb->ifindex = dev->ndev->ifindex;
+ memcpy(cb->haddr, haddr, sizeof(haddr));
+
+ /* Deliver to socket. The route input path pulls the network header,
+ * leaving skb data at type byte onwards. recvmsg will consume the
+ * type for addr.smctp_type
+ */
+ skb_pull(skb, sizeof(hdr));
+ rc = sock_queue_rcv_skb(sock->sk, skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ msg.msg_name = &recv_addr;
+ msg.msg_namelen = sizeof(recv_addr);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, sizeof(rcv_buf));
+
+ recv_len = mctp_recvmsg(sock, &msg, sizeof(rcv_buf),
+ MSG_DONTWAIT | MSG_TRUNC);
+
+ KUNIT_EXPECT_EQ(test, recv_len, sizeof(rcv_buf));
+
+ /* expect our extended address to be populated from hdr and cb */
+ KUNIT_EXPECT_EQ(test, msg.msg_namelen, sizeof(recv_addr));
+ KUNIT_EXPECT_EQ(test, recv_addr.smctp_base.smctp_family, AF_MCTP);
+ KUNIT_EXPECT_EQ(test, recv_addr.smctp_ifindex, dev->ndev->ifindex);
+ KUNIT_EXPECT_EQ(test, recv_addr.smctp_halen, sizeof(haddr));
+ KUNIT_EXPECT_MEMEQ(test, recv_addr.smctp_haddr, haddr, sizeof(haddr));
+
+ __mctp_sock_test_fini(test, dev, rt, sock);
+}
+
+static const struct mctp_test_bind_setup bind_addrany_netdefault_type1 = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = MCTP_NET_ANY, .bind_type = 1,
+};
+
+static const struct mctp_test_bind_setup bind_addrany_net2_type1 = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = 2, .bind_type = 1,
+};
+
+/* 1 is default net */
+static const struct mctp_test_bind_setup bind_addr8_net1_type1 = {
+ .bind_addr = 8, .bind_net = 1, .bind_type = 1,
+};
+
+static const struct mctp_test_bind_setup bind_addrany_net1_type1 = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = 1, .bind_type = 1,
+};
+
+/* 2 is an arbitrary net */
+static const struct mctp_test_bind_setup bind_addr8_net2_type1 = {
+ .bind_addr = 8, .bind_net = 2, .bind_type = 1,
+};
+
+static const struct mctp_test_bind_setup bind_addr8_netdefault_type1 = {
+ .bind_addr = 8, .bind_net = MCTP_NET_ANY, .bind_type = 1,
+};
+
+static const struct mctp_test_bind_setup bind_addrany_net2_type2 = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = 2, .bind_type = 2,
+};
+
+static const struct mctp_test_bind_setup bind_addrany_net2_type1_peer9 = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = 2, .bind_type = 1,
+ .have_peer = true, .peer_addr = 9, .peer_net = 2,
+};
+
+struct mctp_bind_pair_test {
+ const struct mctp_test_bind_setup *bind1;
+ const struct mctp_test_bind_setup *bind2;
+ int error;
+};
+
+/* Pairs of binds and whether they will conflict */
+static const struct mctp_bind_pair_test mctp_bind_pair_tests[] = {
+ /* Both ADDR_ANY, conflict */
+ { &bind_addrany_netdefault_type1, &bind_addrany_netdefault_type1,
+ EADDRINUSE },
+ /* Same specific EID, conflict */
+ { &bind_addr8_netdefault_type1, &bind_addr8_netdefault_type1,
+ EADDRINUSE },
+ /* ADDR_ANY vs specific EID, OK */
+ { &bind_addrany_netdefault_type1, &bind_addr8_netdefault_type1, 0 },
+ /* ADDR_ANY different types, OK */
+ { &bind_addrany_net2_type2, &bind_addrany_net2_type1, 0 },
+ /* ADDR_ANY different nets, OK */
+ { &bind_addrany_net2_type1, &bind_addrany_netdefault_type1, 0 },
+
+ /* specific EID, NET_ANY (resolves to default)
+ * vs specific EID, explicit default net 1, conflict
+ */
+ { &bind_addr8_netdefault_type1, &bind_addr8_net1_type1, EADDRINUSE },
+
+ /* specific EID, net 1 vs specific EID, net 2, ok */
+ { &bind_addr8_net1_type1, &bind_addr8_net2_type1, 0 },
+
+ /* ANY_ADDR, NET_ANY (doesn't resolve to default)
+ * vs ADDR_ANY, explicit default net 1, OK
+ */
+ { &bind_addrany_netdefault_type1, &bind_addrany_net1_type1, 0 },
+
+ /* specific remote peer doesn't conflict with any-peer bind */
+ { &bind_addrany_net2_type1_peer9, &bind_addrany_net2_type1, 0 },
+
+ /* bind() NET_ANY is allowed with a connect() net */
+ { &bind_addrany_net2_type1_peer9, &bind_addrany_netdefault_type1, 0 },
+};
+
+static void mctp_bind_pair_desc(const struct mctp_bind_pair_test *t, char *desc)
+{
+ char peer1[25] = {0}, peer2[25] = {0};
+
+ if (t->bind1->have_peer)
+ snprintf(peer1, sizeof(peer1), ", peer %d net %d",
+ t->bind1->peer_addr, t->bind1->peer_net);
+ if (t->bind2->have_peer)
+ snprintf(peer2, sizeof(peer2), ", peer %d net %d",
+ t->bind2->peer_addr, t->bind2->peer_net);
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE,
+ "{bind(addr %d, type %d, net %d%s)} {bind(addr %d, type %d, net %d%s)} -> error %d",
+ t->bind1->bind_addr, t->bind1->bind_type,
+ t->bind1->bind_net, peer1,
+ t->bind2->bind_addr, t->bind2->bind_type,
+ t->bind2->bind_net, peer2, t->error);
+}
+
+KUNIT_ARRAY_PARAM(mctp_bind_pair, mctp_bind_pair_tests, mctp_bind_pair_desc);
+
+static void mctp_test_bind_invalid(struct kunit *test)
+{
+ struct socket *sock;
+ int rc;
+
+ /* bind() fails if the bind() vs connect() networks mismatch. */
+ const struct mctp_test_bind_setup bind_connect_net_mismatch = {
+ .bind_addr = MCTP_ADDR_ANY, .bind_net = 1, .bind_type = 1,
+ .have_peer = true, .peer_addr = 9, .peer_net = 2,
+ };
+ mctp_test_bind_run(test, &bind_connect_net_mismatch, &rc, &sock);
+ KUNIT_EXPECT_EQ(test, -rc, EINVAL);
+ sock_release(sock);
+}
+
+static int
+mctp_test_bind_conflicts_inner(struct kunit *test,
+ const struct mctp_test_bind_setup *bind1,
+ const struct mctp_test_bind_setup *bind2)
+{
+ struct socket *sock1 = NULL, *sock2 = NULL, *sock3 = NULL;
+ int bind_errno;
+
+ /* Bind to first address, always succeeds */
+ mctp_test_bind_run(test, bind1, &bind_errno, &sock1);
+ KUNIT_EXPECT_EQ(test, bind_errno, 0);
+
+ /* A second identical bind always fails */
+ mctp_test_bind_run(test, bind1, &bind_errno, &sock2);
+ KUNIT_EXPECT_EQ(test, -bind_errno, EADDRINUSE);
+
+ /* A different bind, result is returned */
+ mctp_test_bind_run(test, bind2, &bind_errno, &sock3);
+
+ if (sock1)
+ sock_release(sock1);
+ if (sock2)
+ sock_release(sock2);
+ if (sock3)
+ sock_release(sock3);
+
+ return bind_errno;
+}
+
+static void mctp_test_bind_conflicts(struct kunit *test)
+{
+ const struct mctp_bind_pair_test *pair;
+ int bind_errno;
+
+ pair = test->param_value;
+
+ bind_errno =
+ mctp_test_bind_conflicts_inner(test, pair->bind1, pair->bind2);
+ KUNIT_EXPECT_EQ(test, -bind_errno, pair->error);
+
+ /* swapping the calls, the second bind should still fail */
+ bind_errno =
+ mctp_test_bind_conflicts_inner(test, pair->bind2, pair->bind1);
+ KUNIT_EXPECT_EQ(test, -bind_errno, pair->error);
+}
+
+static void mctp_test_assumptions(struct kunit *test)
+{
+ /* check assumption of default net from bind_addr8_net1_type1 */
+ KUNIT_ASSERT_EQ(test, mctp_default_net(&init_net), 1);
+}
+
+static struct kunit_case mctp_test_cases[] = {
+ KUNIT_CASE(mctp_test_assumptions),
+ KUNIT_CASE(mctp_test_sock_sendmsg_extaddr),
+ KUNIT_CASE(mctp_test_sock_recvmsg_extaddr),
+ KUNIT_CASE_PARAM(mctp_test_bind_conflicts, mctp_bind_pair_gen_params),
+ KUNIT_CASE(mctp_test_bind_invalid),
+ {}
+};
+
+static struct kunit_suite mctp_test_suite = {
+ .name = "mctp-sock",
+ .test_cases = mctp_test_cases,
+};
+
+kunit_test_suite(mctp_test_suite);
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index 565763eb0211..953d41902771 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -26,19 +26,22 @@ static void mctp_test_dev_setup(struct net_device *ndev)
ndev->type = ARPHRD_MCTP;
ndev->mtu = MCTP_DEV_TEST_MTU;
ndev->hard_header_len = 0;
- ndev->addr_len = 0;
ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
ndev->flags = IFF_NOARP;
ndev->netdev_ops = &mctp_test_netdev_ops;
ndev->needs_free_netdev = true;
}
-struct mctp_test_dev *mctp_test_create_dev(void)
+static struct mctp_test_dev *__mctp_test_create_dev(unsigned short lladdr_len,
+ const unsigned char *lladdr)
{
struct mctp_test_dev *dev;
struct net_device *ndev;
int rc;
+ if (WARN_ON(lladdr_len > MAX_ADDR_LEN))
+ return NULL;
+
ndev = alloc_netdev(sizeof(*dev), "mctptest%d", NET_NAME_ENUM,
mctp_test_dev_setup);
if (!ndev)
@@ -46,6 +49,8 @@ struct mctp_test_dev *mctp_test_create_dev(void)
dev = netdev_priv(ndev);
dev->ndev = ndev;
+ ndev->addr_len = lladdr_len;
+ dev_addr_set(ndev, lladdr);
rc = register_netdev(ndev);
if (rc) {
@@ -61,8 +66,231 @@ struct mctp_test_dev *mctp_test_create_dev(void)
return dev;
}
+struct mctp_test_dev *mctp_test_create_dev(void)
+{
+ return __mctp_test_create_dev(0, NULL);
+}
+
+struct mctp_test_dev *mctp_test_create_dev_lladdr(unsigned short lladdr_len,
+ const unsigned char *lladdr)
+{
+ return __mctp_test_create_dev(lladdr_len, lladdr);
+}
+
void mctp_test_destroy_dev(struct mctp_test_dev *dev)
{
mctp_dev_put(dev->mdev);
unregister_netdev(dev->ndev);
}
+
+static const unsigned int test_pktqueue_magic = 0x5f713aef;
+
+void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq)
+{
+ tpq->magic = test_pktqueue_magic;
+ skb_queue_head_init(&tpq->pkts);
+}
+
+static int mctp_test_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
+{
+ struct kunit *test = current->kunit_test;
+ struct mctp_test_pktqueue *tpq = test->priv;
+
+ KUNIT_ASSERT_EQ(test, tpq->magic, test_pktqueue_magic);
+
+ skb_queue_tail(&tpq->pkts, skb);
+
+ return 0;
+}
+
+/* local version of mctp_route_alloc() */
+static struct mctp_test_route *mctp_route_test_alloc(void)
+{
+ struct mctp_test_route *rt;
+
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return NULL;
+
+ INIT_LIST_HEAD(&rt->rt.list);
+ refcount_set(&rt->rt.refs, 1);
+ rt->rt.output = mctp_test_dst_output;
+
+ return rt;
+}
+
+struct mctp_test_route *mctp_test_create_route_direct(struct net *net,
+ struct mctp_dev *dev,
+ mctp_eid_t eid,
+ unsigned int mtu)
+{
+ struct mctp_test_route *rt;
+
+ rt = mctp_route_test_alloc();
+ if (!rt)
+ return NULL;
+
+ rt->rt.min = eid;
+ rt->rt.max = eid;
+ rt->rt.mtu = mtu;
+ rt->rt.type = RTN_UNSPEC;
+ rt->rt.dst_type = MCTP_ROUTE_DIRECT;
+ if (dev)
+ mctp_dev_hold(dev);
+ rt->rt.dev = dev;
+
+ list_add_rcu(&rt->rt.list, &net->mctp.routes);
+
+ return rt;
+}
+
+struct mctp_test_route *mctp_test_create_route_gw(struct net *net,
+ unsigned int netid,
+ mctp_eid_t eid,
+ mctp_eid_t gw,
+ unsigned int mtu)
+{
+ struct mctp_test_route *rt;
+
+ rt = mctp_route_test_alloc();
+ if (!rt)
+ return NULL;
+
+ rt->rt.min = eid;
+ rt->rt.max = eid;
+ rt->rt.mtu = mtu;
+ rt->rt.type = RTN_UNSPEC;
+ rt->rt.dst_type = MCTP_ROUTE_GATEWAY;
+ rt->rt.gateway.eid = gw;
+ rt->rt.gateway.net = netid;
+
+ list_add_rcu(&rt->rt.list, &net->mctp.routes);
+
+ return rt;
+}
+
+/* Convenience function for our test dst; release with mctp_test_dst_release()
+ */
+void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst,
+ struct mctp_test_dev *dev,
+ struct mctp_test_pktqueue *tpq, unsigned int mtu)
+{
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+
+ memset(dst, 0, sizeof(*dst));
+
+ dst->dev = dev->mdev;
+ __mctp_dev_get(dst->dev->dev);
+ dst->mtu = mtu;
+ dst->output = mctp_test_dst_output;
+ mctp_test_pktqueue_init(tpq);
+ test->priv = tpq;
+}
+
+void mctp_test_dst_release(struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq)
+{
+ mctp_dst_release(dst);
+ skb_queue_purge(&tpq->pkts);
+}
+
+void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt)
+{
+ unsigned int refs;
+
+ rtnl_lock();
+ list_del_rcu(&rt->rt.list);
+ rtnl_unlock();
+
+ if (rt->rt.dst_type == MCTP_ROUTE_DIRECT && rt->rt.dev)
+ mctp_dev_put(rt->rt.dev);
+
+ refs = refcount_read(&rt->rt.refs);
+ KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance");
+
+ kfree_rcu(&rt->rt, rcu);
+}
+
+void mctp_test_skb_set_dev(struct sk_buff *skb, struct mctp_test_dev *dev)
+{
+ struct mctp_skb_cb *cb;
+
+ cb = mctp_cb(skb);
+ cb->net = READ_ONCE(dev->mdev->net);
+ skb->dev = dev->ndev;
+}
+
+struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
+ unsigned int data_len)
+{
+ size_t hdr_len = sizeof(*hdr);
+ struct sk_buff *skb;
+ unsigned int i;
+ u8 *buf;
+
+ skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ __mctp_cb(skb);
+ memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
+
+ buf = skb_put(skb, data_len);
+ for (i = 0; i < data_len; i++)
+ buf[i] = i & 0xff;
+
+ return skb;
+}
+
+struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
+ const void *data, size_t data_len)
+{
+ size_t hdr_len = sizeof(*hdr);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ __mctp_cb(skb);
+ memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ return skb;
+}
+
+void mctp_test_bind_run(struct kunit *test,
+ const struct mctp_test_bind_setup *setup,
+ int *ret_bind_errno, struct socket **sock)
+{
+ struct sockaddr_mctp addr;
+ int rc;
+
+ *ret_bind_errno = -EIO;
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ /* connect() if requested */
+ if (setup->have_peer) {
+ memset(&addr, 0x0, sizeof(addr));
+ addr.smctp_family = AF_MCTP;
+ addr.smctp_network = setup->peer_net;
+ addr.smctp_addr.s_addr = setup->peer_addr;
+ /* connect() type must match bind() type */
+ addr.smctp_type = setup->bind_type;
+ rc = kernel_connect(*sock, (struct sockaddr *)&addr,
+ sizeof(addr), 0);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+ }
+
+ /* bind() */
+ memset(&addr, 0x0, sizeof(addr));
+ addr.smctp_family = AF_MCTP;
+ addr.smctp_network = setup->bind_net;
+ addr.smctp_addr.s_addr = setup->bind_addr;
+ addr.smctp_type = setup->bind_type;
+
+ *ret_bind_errno =
+ kernel_bind(*sock, (struct sockaddr *)&addr, sizeof(addr));
+}
diff --git a/net/mctp/test/utils.h b/net/mctp/test/utils.h
index df6aa1c03440..06bdb6cb5eff 100644
--- a/net/mctp/test/utils.h
+++ b/net/mctp/test/utils.h
@@ -3,6 +3,11 @@
#ifndef __NET_MCTP_TEST_UTILS_H
#define __NET_MCTP_TEST_UTILS_H
+#include <uapi/linux/netdevice.h>
+
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+
#include <kunit/test.h>
#define MCTP_DEV_TEST_MTU 68
@@ -10,11 +15,67 @@
struct mctp_test_dev {
struct net_device *ndev;
struct mctp_dev *mdev;
+
+ unsigned short lladdr_len;
+ unsigned char lladdr[MAX_ADDR_LEN];
};
struct mctp_test_dev;
+struct mctp_test_route {
+ struct mctp_route rt;
+};
+
+struct mctp_test_pktqueue {
+ unsigned int magic;
+ struct sk_buff_head pkts;
+};
+
+struct mctp_test_bind_setup {
+ mctp_eid_t bind_addr;
+ int bind_net;
+ u8 bind_type;
+
+ bool have_peer;
+ mctp_eid_t peer_addr;
+ int peer_net;
+
+ /* optional name. Used for comparison in "lookup" tests */
+ const char *name;
+};
+
struct mctp_test_dev *mctp_test_create_dev(void);
+struct mctp_test_dev *mctp_test_create_dev_lladdr(unsigned short lladdr_len,
+ const unsigned char *lladdr);
void mctp_test_destroy_dev(struct mctp_test_dev *dev);
+struct mctp_test_route *mctp_test_create_route_direct(struct net *net,
+ struct mctp_dev *dev,
+ mctp_eid_t eid,
+ unsigned int mtu);
+struct mctp_test_route *mctp_test_create_route_gw(struct net *net,
+ unsigned int netid,
+ mctp_eid_t eid,
+ mctp_eid_t gw,
+ unsigned int mtu);
+void mctp_test_dst_setup(struct kunit *test, struct mctp_dst *dst,
+ struct mctp_test_dev *dev,
+ struct mctp_test_pktqueue *tpq, unsigned int mtu);
+void mctp_test_dst_release(struct mctp_dst *dst,
+ struct mctp_test_pktqueue *tpq);
+void mctp_test_pktqueue_init(struct mctp_test_pktqueue *tpq);
+void mctp_test_route_destroy(struct kunit *test, struct mctp_test_route *rt);
+void mctp_test_skb_set_dev(struct sk_buff *skb, struct mctp_test_dev *dev);
+struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
+ unsigned int data_len);
+struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
+ const void *data, size_t data_len);
+
+#define mctp_test_create_skb_data(h, d) \
+ __mctp_test_create_skb_data(h, d, sizeof(*d))
+
+void mctp_test_bind_run(struct kunit *test,
+ const struct mctp_test_bind_setup *setup,
+ int *ret_bind_errno, struct socket **sock);
+
#endif /* __NET_MCTP_TEST_UTILS_H */
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d536c97144e9..25c88cba5c48 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -81,8 +81,8 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
if (index < net->mpls.platform_labels) {
struct mpls_route __rcu **platform_label =
- rcu_dereference(net->mpls.platform_label);
- rt = rcu_dereference(platform_label[index]);
+ rcu_dereference_rtnl(net->mpls.platform_label);
+ rt = rcu_dereference_rtnl(platform_label[index]);
}
return rt;
}
@@ -706,7 +706,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
} else {
unsigned int flags;
- flags = dev_get_flags(dev);
+ flags = netif_get_flags(dev);
if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
nh->nh_flags |= RTNH_F_LINKDOWN;
}
@@ -1616,14 +1616,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(err);
break;
case NETDEV_UP:
- flags = dev_get_flags(dev);
+ flags = netif_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
else
mpls_ifup(dev, RTNH_F_DEAD);
break;
case NETDEV_CHANGE:
- flags = dev_get_flags(dev);
+ flags = netif_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
} else {
diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c
index b08ba959ac4f..31948e18d97d 100644
--- a/net/mptcp/crypto.c
+++ b/net/mptcp/crypto.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <crypto/sha2.h>
-#include <linux/unaligned.h>
#include "protocol.h"
@@ -43,39 +42,9 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn)
void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
{
- u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE];
- u8 key1be[8];
- u8 key2be[8];
- int i;
+ __be64 key[2] = { cpu_to_be64(key1), cpu_to_be64(key2) };
- if (WARN_ON_ONCE(len > SHA256_DIGEST_SIZE))
- len = SHA256_DIGEST_SIZE;
-
- put_unaligned_be64(key1, key1be);
- put_unaligned_be64(key2, key2be);
-
- /* Generate key xored with ipad */
- memset(input, 0x36, SHA256_BLOCK_SIZE);
- for (i = 0; i < 8; i++)
- input[i] ^= key1be[i];
- for (i = 0; i < 8; i++)
- input[i + 8] ^= key2be[i];
-
- memcpy(&input[SHA256_BLOCK_SIZE], msg, len);
-
- /* emit sha256(K1 || msg) on the second input block, so we can
- * reuse 'input' for the last hashing
- */
- sha256(input, SHA256_BLOCK_SIZE + len, &input[SHA256_BLOCK_SIZE]);
-
- /* Prepare second part of hmac */
- memset(input, 0x5C, SHA256_BLOCK_SIZE);
- for (i = 0; i < 8; i++)
- input[i] ^= key1be[i];
- for (i = 0; i < 8; i++)
- input[i + 8] ^= key2be[i];
-
- sha256(input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE, hmac);
+ hmac_sha256_usingrawkey((const u8 *)key, sizeof(key), msg, len, hmac);
}
#if IS_MODULE(CONFIG_MPTCP_KUNIT_TEST)
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index d9290c5bb6c7..d96130e49942 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -501,10 +501,15 @@ void mptcp_active_enable(struct sock *sk)
struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
if (atomic_read(&pernet->active_disable_times)) {
- struct dst_entry *dst = sk_dst_get(sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
- if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!(dev && (dev->flags & IFF_LOOPBACK)))
atomic_set(&pernet->active_disable_times, 0);
+ rcu_read_unlock();
}
}
@@ -533,9 +538,9 @@ void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback;
if (timeouts == to_max || (timeouts < to_max && expired)) {
- MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
subflow->mpc_drop = 1;
- mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+ mptcp_early_fallback(mptcp_sk(subflow->conn), subflow,
+ MPTCP_MIB_MPCAPABLEACTIVEDROP);
}
}
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 0c24545f0e8d..6003e47c770a 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -80,7 +80,11 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
SNMP_MIB_ITEM("Blackhole", MPTCP_MIB_BLACKHOLE),
- SNMP_MIB_SENTINEL
+ SNMP_MIB_ITEM("MPCapableDataFallback", MPTCP_MIB_MPCAPABLEDATAFALLBACK),
+ SNMP_MIB_ITEM("MD5SigFallback", MPTCP_MIB_MD5SIGFALLBACK),
+ SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),
+ SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),
+ SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED),
};
/* mptcp_mib_alloc - allocate percpu mib counters
@@ -103,22 +107,23 @@ bool mptcp_mib_alloc(struct net *net)
void mptcp_seq_show(struct seq_file *seq)
{
- unsigned long sum[ARRAY_SIZE(mptcp_snmp_list) - 1];
+ unsigned long sum[ARRAY_SIZE(mptcp_snmp_list)];
+ const int cnt = ARRAY_SIZE(mptcp_snmp_list);
struct net *net = seq->private;
int i;
seq_puts(seq, "MPTcpExt:");
- for (i = 0; mptcp_snmp_list[i].name; i++)
+ for (i = 0; i < cnt; i++)
seq_printf(seq, " %s", mptcp_snmp_list[i].name);
seq_puts(seq, "\nMPTcpExt:");
memset(sum, 0, sizeof(sum));
if (net->mib.mptcp_statistics)
- snmp_get_cpu_field_batch(sum, mptcp_snmp_list,
- net->mib.mptcp_statistics);
+ snmp_get_cpu_field_batch_cnt(sum, mptcp_snmp_list, cnt,
+ net->mib.mptcp_statistics);
- for (i = 0; mptcp_snmp_list[i].name; i++)
+ for (i = 0; i < cnt; i++)
seq_printf(seq, " %lu", sum[i]);
seq_putc(seq, '\n');
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 250c6b77977e..309bac6fea32 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -81,6 +81,13 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
+ MPTCP_MIB_MPCAPABLEDATAFALLBACK, /* Missing DSS/MPC+data on first
+ * established packet
+ */
+ MPTCP_MIB_MD5SIGFALLBACK, /* Conflicting TCP option enabled */
+ MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */
+ MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */
+ MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index 0566dd793810..ac974299de71 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -15,9 +15,9 @@
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
- struct nlattr *bc, bool net_admin)
+ bool net_admin)
{
- if (!inet_diag_bc_sk(bc, sk))
+ if (!inet_diag_bc_sk(cb->data, sk))
return 0;
return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI,
@@ -76,9 +76,7 @@ static void mptcp_diag_dump_listeners(struct sk_buff *skb, struct netlink_callba
const struct inet_diag_req_v2 *r,
bool net_admin)
{
- struct inet_diag_dump_data *cb_data = cb->data;
struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
- struct nlattr *bc = cb_data->inet_diag_nla_bc;
struct net *net = sock_net(skb->sk);
struct inet_hashinfo *hinfo;
int i;
@@ -121,7 +119,7 @@ static void mptcp_diag_dump_listeners(struct sk_buff *skb, struct netlink_callba
if (!refcount_inc_not_zero(&sk->sk_refcnt))
goto next_listen;
- ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin);
+ ret = sk_diag_dump(sk, skb, cb, r, net_admin);
sock_put(sk);
@@ -154,15 +152,10 @@ static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx;
struct net *net = sock_net(skb->sk);
- struct inet_diag_dump_data *cb_data;
struct mptcp_sock *msk;
- struct nlattr *bc;
BUILD_BUG_ON(sizeof(cb->ctx) < sizeof(*diag_ctx));
- cb_data = cb->data;
- bc = cb_data->inet_diag_nla_bc;
-
while ((msk = mptcp_token_iter_next(net, &diag_ctx->s_slot,
&diag_ctx->s_num)) != NULL) {
struct inet_sock *inet = (struct inet_sock *)msk;
@@ -181,7 +174,7 @@ static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
r->id.idiag_dport)
goto next;
- ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin);
+ ret = sk_diag_dump(sk, skb, cb, r, net_admin);
next:
sock_put(sk);
if (ret < 0) {
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 421ced031289..1103b3341a70 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -978,18 +978,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (subflow->mp_join)
goto reset;
subflow->mp_capable = 0;
- pr_fallback(msk);
- mptcp_do_fallback(ssk);
+ if (!mptcp_try_fallback(ssk, MPTCP_MIB_MPCAPABLEDATAFALLBACK)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
+ goto reset;
+ }
return false;
}
- if (mp_opt->deny_join_id0)
- WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
-
if (unlikely(!READ_ONCE(msk->pm.server_side)))
pr_warn_once("bogus mpc option on established client sk");
set_fully_established:
+ if (mp_opt->deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+
mptcp_data_lock((struct sock *)msk);
__mptcp_subflow_fully_established(msk, subflow, mp_opt);
mptcp_data_unlock((struct sock *)msk);
@@ -1116,7 +1118,9 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-/* Return false if a subflow has been reset, else return true */
+/* Return false in case of error (or subflow has been reset),
+ * else return true.
+ */
bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -1220,7 +1224,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
- return true;
+ return false;
memset(mpext, 0, sizeof(*mpext));
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index feb01747d7d8..2ff1b9499568 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -268,12 +268,34 @@ int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk,
return -EINVAL;
}
+static unsigned int mptcp_adjust_add_addr_timeout(struct mptcp_sock *msk)
+{
+ const struct net *net = sock_net((struct sock *)msk);
+ unsigned int rto = mptcp_get_add_addr_timeout(net);
+ struct mptcp_subflow_context *subflow;
+ unsigned int max = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ struct inet_connection_sock *icsk = inet_csk(ssk);
+
+ if (icsk->icsk_rto > max)
+ max = icsk->icsk_rto;
+ }
+
+ if (max && max < rto)
+ rto = max;
+
+ return rto;
+}
+
static void mptcp_pm_add_timer(struct timer_list *timer)
{
struct mptcp_pm_add_entry *entry = timer_container_of(entry, timer,
add_timer);
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
+ unsigned int timeout;
pr_debug("msk=%p\n", msk);
@@ -291,6 +313,10 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
goto out;
}
+ timeout = mptcp_adjust_add_addr_timeout(msk);
+ if (!timeout)
+ goto out;
+
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) {
@@ -302,7 +328,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
sk_reset_timer(sk, timer,
- jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
+ jiffies + (timeout << entry->retrans_times));
spin_unlock_bh(&msk->pm.lock);
@@ -343,7 +369,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
- struct net *net = sock_net(sk);
+ unsigned int timeout;
lockdep_assert_held(&msk->pm.lock);
@@ -353,9 +379,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk)))
return false;
- sk_reset_timer(sk, &add_entry->add_timer,
- jiffies + mptcp_get_add_addr_timeout(net));
- return true;
+ goto reset_timer;
}
add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
@@ -369,8 +393,10 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
add_entry->retrans_times = 0;
timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
- sk_reset_timer(sk, &add_entry->add_timer,
- jiffies + mptcp_get_add_addr_timeout(net));
+reset_timer:
+ timeout = mptcp_adjust_add_addr_timeout(msk);
+ if (timeout)
+ sk_reset_timer(sk, &add_entry->add_timer, jiffies + timeout);
return true;
}
@@ -457,23 +483,24 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- unsigned int subflows_max;
+ unsigned int limit_extra_subflows;
int ret = 0;
if (mptcp_pm_is_userspace(msk)) {
if (mptcp_userspace_pm_active(msk)) {
spin_lock_bh(&pm->lock);
- pm->subflows++;
+ pm->extra_subflows++;
spin_unlock_bh(&pm->lock);
return true;
}
return false;
}
- subflows_max = mptcp_pm_get_subflows_max(msk);
+ limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
- pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
- subflows_max, READ_ONCE(pm->accept_subflow));
+ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk,
+ pm->extra_subflows, limit_extra_subflows,
+ READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
if (!READ_ONCE(pm->accept_subflow))
@@ -481,8 +508,8 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
spin_lock_bh(&pm->lock);
if (READ_ONCE(pm->accept_subflow)) {
- ret = pm->subflows < subflows_max;
- if (ret && ++pm->subflows == subflows_max)
+ ret = pm->extra_subflows < limit_extra_subflows;
+ if (ret && ++pm->extra_subflows == limit_extra_subflows)
WRITE_ONCE(pm->accept_subflow, false);
}
spin_unlock_bh(&pm->lock);
@@ -568,7 +595,7 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk,
if (mptcp_pm_is_userspace(msk)) {
if (update_subflows) {
spin_lock_bh(&pm->lock);
- pm->subflows--;
+ pm->extra_subflows--;
spin_unlock_bh(&pm->lock);
}
return;
@@ -611,9 +638,12 @@ void mptcp_pm_add_addr_received(const struct sock *ssk,
} else {
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
- /* id0 should not have a different address */
+ /* - id0 should not have a different address
+ * - special case for C-flag: linked to fill_local_addresses_vec()
+ */
} else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) ||
- (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+ (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
+ !mptcp_pm_add_addr_c_flag_case(msk))) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
@@ -765,8 +795,14 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
pr_debug("fail_seq=%llu\n", fail_seq);
- if (!READ_ONCE(msk->allow_infinite_fallback))
+ /* After accepting the fail, we can't create any other subflows */
+ spin_lock_bh(&msk->fallback_lock);
+ if (!msk->allow_infinite_fallback) {
+ spin_unlock_bh(&msk->fallback_lock);
return;
+ }
+ msk->allow_subflows = false;
+ spin_unlock_bh(&msk->fallback_lock);
if (!subflow->fail_tout) {
pr_debug("send MP_FAIL response and infinite map\n");
@@ -993,17 +1029,17 @@ void mptcp_pm_data_reset(struct mptcp_sock *msk)
WRITE_ONCE(pm->pm_type, pm_type);
if (pm_type == MPTCP_PM_TYPE_KERNEL) {
- bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk);
+ bool subflows_allowed = !!mptcp_pm_get_limit_extra_subflows(msk);
/* pm->work_pending must be only be set to 'true' when
* pm->pm_type is set to MPTCP_PM_TYPE_KERNEL
*/
WRITE_ONCE(pm->work_pending,
- (!!mptcp_pm_get_local_addr_max(msk) &&
+ (!!mptcp_pm_get_endp_subflow_max(msk) &&
subflows_allowed) ||
- !!mptcp_pm_get_add_addr_signal_max(msk));
+ !!mptcp_pm_get_endp_signal_max(msk));
WRITE_ONCE(pm->accept_addr,
- !!mptcp_pm_get_add_addr_accept_max(msk) &&
+ !!mptcp_pm_get_limit_add_addr_accepted(msk) &&
subflows_allowed);
WRITE_ONCE(pm->accept_subflow, subflows_allowed);
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index d39e7c178460..2ae95476dba3 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -17,14 +17,14 @@ static int pm_nl_pernet_id;
struct pm_nl_pernet {
/* protects pernet updates */
spinlock_t lock;
- struct list_head local_addr_list;
- unsigned int addrs;
- unsigned int stale_loss_cnt;
- unsigned int add_addr_signal_max;
- unsigned int add_addr_accept_max;
- unsigned int local_addr_max;
- unsigned int subflows_max;
- unsigned int next_id;
+ struct list_head endp_list;
+ u8 endpoints;
+ u8 endp_signal_max;
+ u8 endp_subflow_max;
+ u8 endp_laminar_max;
+ u8 limit_add_addr_accepted;
+ u8 limit_extra_subflows;
+ u8 next_id;
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
};
@@ -46,37 +46,45 @@ static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
return pm_nl_get_pernet(genl_info_net(info));
}
-unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
+u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk)
{
const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- return READ_ONCE(pernet->add_addr_signal_max);
+ return READ_ONCE(pernet->endp_signal_max);
}
-EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
+EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_signal_max);
-unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
+u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- return READ_ONCE(pernet->add_addr_accept_max);
+ return READ_ONCE(pernet->endp_subflow_max);
}
-EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
+EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_subflow_max);
-unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
+u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- return READ_ONCE(pernet->subflows_max);
+ return READ_ONCE(pernet->endp_laminar_max);
}
-EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
+EXPORT_SYMBOL_GPL(mptcp_pm_get_endp_laminar_max);
-unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
+u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- return READ_ONCE(pernet->local_addr_max);
+ return READ_ONCE(pernet->limit_add_addr_accepted);
}
-EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
+EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_add_addr_accepted);
+
+u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+
+ return READ_ONCE(pernet->limit_extra_subflows);
+}
+EXPORT_SYMBOL_GPL(mptcp_pm_get_limit_extra_subflows);
static bool lookup_subflow_by_daddr(const struct list_head *list,
const struct mptcp_addr_info *daddr)
@@ -110,7 +118,7 @@ select_local_address(const struct pm_nl_pernet *pernet,
msk_owned_by_me(msk);
rcu_read_lock();
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list) {
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
continue;
@@ -141,7 +149,7 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
* Note: removal from the local address list during the msk life-cycle
* can lead to additional addresses not being announced.
*/
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list) {
if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
continue;
@@ -159,80 +167,96 @@ select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk,
return found;
}
-/* Fill all the remote addresses into the array addrs[],
- * and return the array size.
- */
-static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
- struct mptcp_addr_info *local,
- bool fullmesh,
- struct mptcp_addr_info *addrs)
+static unsigned int
+fill_remote_addr(struct mptcp_sock *msk, struct mptcp_addr_info *local,
+ struct mptcp_addr_info *addrs)
{
bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
- struct sock *sk = (struct sock *)msk, *ssk;
- struct mptcp_subflow_context *subflow;
struct mptcp_addr_info remote = { 0 };
- unsigned int subflows_max;
- int i = 0;
+ struct sock *sk = (struct sock *)msk;
+
+ if (deny_id0)
+ return 0;
- subflows_max = mptcp_pm_get_subflows_max(msk);
mptcp_remote_address((struct sock_common *)sk, &remote);
- /* Non-fullmesh endpoint, fill in the single entry
- * corresponding to the primary MPC subflow remote address
- */
- if (!fullmesh) {
- if (deny_id0)
- return 0;
+ if (!mptcp_pm_addr_families_match(sk, local, &remote))
+ return 0;
- if (!mptcp_pm_addr_families_match(sk, local, &remote))
- return 0;
+ msk->pm.extra_subflows++;
+ *addrs = remote;
- msk->pm.subflows++;
- addrs[i++] = remote;
- } else {
- DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ return 1;
+}
- /* Forbid creation of new subflows matching existing
- * ones, possibly already created by incoming ADD_ADDR
- */
- bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
- mptcp_for_each_subflow(msk, subflow)
- if (READ_ONCE(subflow->local_id) == local->id)
- __set_bit(subflow->remote_id, unavail_id);
-
- mptcp_for_each_subflow(msk, subflow) {
- ssk = mptcp_subflow_tcp_sock(subflow);
- mptcp_remote_address((struct sock_common *)ssk, &addrs[i]);
- addrs[i].id = READ_ONCE(subflow->remote_id);
- if (deny_id0 && !addrs[i].id)
- continue;
+static unsigned int
+fill_remote_addresses_fullmesh(struct mptcp_sock *msk,
+ struct mptcp_addr_info *local,
+ struct mptcp_addr_info *addrs)
+{
+ u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
+ bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
+ DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ struct sock *sk = (struct sock *)msk, *ssk;
+ struct mptcp_subflow_context *subflow;
+ int i = 0;
- if (test_bit(addrs[i].id, unavail_id))
- continue;
+ /* Forbid creation of new subflows matching existing ones, possibly
+ * already created by incoming ADD_ADDR
+ */
+ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ mptcp_for_each_subflow(msk, subflow)
+ if (READ_ONCE(subflow->local_id) == local->id)
+ __set_bit(subflow->remote_id, unavail_id);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ mptcp_remote_address((struct sock_common *)ssk, &addrs[i]);
+ addrs[i].id = READ_ONCE(subflow->remote_id);
+ if (deny_id0 && !addrs[i].id)
+ continue;
- if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
- continue;
+ if (test_bit(addrs[i].id, unavail_id))
+ continue;
- if (msk->pm.subflows < subflows_max) {
- /* forbid creating multiple address towards
- * this id
- */
- __set_bit(addrs[i].id, unavail_id);
- msk->pm.subflows++;
- i++;
- }
- }
+ if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
+ continue;
+
+ /* forbid creating multiple address towards this id */
+ __set_bit(addrs[i].id, unavail_id);
+ msk->pm.extra_subflows++;
+ i++;
+
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
+ break;
}
return i;
}
+/* Fill all the remote addresses into the array addrs[],
+ * and return the array size.
+ */
+static unsigned int
+fill_remote_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *local,
+ bool fullmesh, struct mptcp_addr_info *addrs)
+{
+ /* Non-fullmesh: fill in the single entry corresponding to the primary
+ * MPC subflow remote address, and return 1, corresponding to 1 entry.
+ */
+ if (!fullmesh)
+ return fill_remote_addr(msk, local, addrs);
+
+ /* Fullmesh endpoint: fill all possible remote addresses */
+ return fill_remote_addresses_fullmesh(msk, local, addrs);
+}
+
static struct mptcp_pm_addr_entry *
__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
{
struct mptcp_pm_addr_entry *entry;
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list,
lockdep_is_held(&pernet->lock)) {
if (entry->addr.id == id)
return entry;
@@ -245,7 +269,7 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
{
struct mptcp_pm_addr_entry *entry;
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list,
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list,
lockdep_is_held(&pernet->lock)) {
if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
return entry;
@@ -253,52 +277,65 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
return NULL;
}
-static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
{
- struct sock *sk = (struct sock *)msk;
- unsigned int add_addr_signal_max;
- bool signal_and_subflow = false;
- unsigned int local_addr_max;
- struct pm_nl_pernet *pernet;
- struct mptcp_pm_local local;
- unsigned int subflows_max;
-
- pernet = pm_nl_get_pernet(sock_net(sk));
+ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
+}
- add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
- local_addr_max = mptcp_pm_get_local_addr_max(msk);
- subflows_max = mptcp_pm_get_subflows_max(msk);
+/* Set mpc_endpoint_id, and send MP_PRIO for ID0 if needed */
+static void mptcp_mpc_endpoint_setup(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_addr_info mpc_addr;
+ struct pm_nl_pernet *pernet;
+ bool backup = false;
/* do lazy endpoint usage accounting for the MPC subflows */
- if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
- struct mptcp_pm_addr_entry *entry;
- struct mptcp_addr_info mpc_addr;
- bool backup = false;
-
- mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
- rcu_read_lock();
- entry = __lookup_addr(pernet, &mpc_addr);
- if (entry) {
- __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
- msk->mpc_endpoint_id = entry->addr.id;
- backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
- }
- rcu_read_unlock();
+ if (likely(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED)) ||
+ !msk->first)
+ return;
- if (backup)
- mptcp_pm_send_ack(msk, subflow, true, backup);
+ subflow = mptcp_subflow_ctx(msk->first);
+ pernet = pm_nl_get_pernet_from_msk(msk);
- msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
+ mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
+ rcu_read_lock();
+ entry = __lookup_addr(pernet, &mpc_addr);
+ if (entry) {
+ __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
+ msk->mpc_endpoint_id = entry->addr.id;
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
}
+ rcu_read_unlock();
+
+ /* Send MP_PRIO */
+ if (backup)
+ mptcp_pm_send_ack(msk, subflow, true, backup);
+
+ msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
+}
+
+static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+{
+ u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk);
+ u8 endp_signal_max = mptcp_pm_get_endp_signal_max(msk);
+ struct sock *sk = (struct sock *)msk;
+ bool signal_and_subflow = false;
+ struct mptcp_pm_local local;
+
+ mptcp_mpc_endpoint_setup(msk);
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
- msk->pm.local_addr_used, local_addr_max,
- msk->pm.add_addr_signaled, add_addr_signal_max,
- msk->pm.subflows, subflows_max);
+ msk->pm.local_addr_used, endp_subflow_max,
+ msk->pm.add_addr_signaled, endp_signal_max,
+ msk->pm.extra_subflows, limit_extra_subflows);
/* check first for announce */
- if (msk->pm.add_addr_signaled < add_addr_signal_max) {
+ if (msk->pm.add_addr_signaled < endp_signal_max) {
/* due to racing events on both ends we can reach here while
* previous add address is still running: if we invoke now
* mptcp_pm_announce_addr(), that will fail and the
@@ -333,9 +370,13 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
}
subflow:
+ /* No need to try establishing subflows to remote id0 if not allowed */
+ if (mptcp_pm_add_addr_c_flag_case(msk))
+ goto exit;
+
/* check if should create a new subflow */
- while (msk->pm.local_addr_used < local_addr_max &&
- msk->pm.subflows < subflows_max) {
+ while (msk->pm.local_addr_used < endp_subflow_max &&
+ msk->pm.extra_subflows < limit_extra_subflows) {
struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
bool fullmesh;
int i, nr;
@@ -364,6 +405,8 @@ subflow:
__mptcp_subflow_connect(sk, &local, &addrs[i]);
spin_lock_bh(&msk->pm.lock);
}
+
+exit:
mptcp_pm_nl_check_work_pending(msk);
}
@@ -377,90 +420,225 @@ static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
mptcp_pm_create_subflow_or_signal_addr(msk);
}
-/* Fill all the local addresses into the array addrs[],
- * and return the array size.
- */
-static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
- struct mptcp_addr_info *remote,
- struct mptcp_pm_local *locals)
+static unsigned int
+fill_local_addresses_vec_fullmesh(struct mptcp_sock *msk,
+ struct mptcp_addr_info *remote,
+ struct mptcp_pm_local *locals,
+ bool c_flag_case)
{
+ u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *entry;
- struct mptcp_addr_info mpc_addr;
- struct pm_nl_pernet *pernet;
- unsigned int subflows_max;
+ struct mptcp_pm_local *local;
int i = 0;
- pernet = pm_nl_get_pernet_from_msk(msk);
- subflows_max = mptcp_pm_get_subflows_max(msk);
-
- mptcp_local_address((struct sock_common *)msk, &mpc_addr);
-
rcu_read_lock();
- list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list) {
+ bool is_id0;
+
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
continue;
if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote))
continue;
- if (msk->pm.subflows < subflows_max) {
- locals[i].addr = entry->addr;
- locals[i].flags = entry->flags;
- locals[i].ifindex = entry->ifindex;
+ local = &locals[i];
+ local->addr = entry->addr;
+ local->flags = entry->flags;
+ local->ifindex = entry->ifindex;
+
+ is_id0 = local->addr.id == msk->mpc_endpoint_id;
- /* Special case for ID0: set the correct ID */
- if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port))
- locals[i].addr.id = 0;
+ if (c_flag_case &&
+ (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) {
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
- msk->pm.subflows++;
- i++;
+ if (!is_id0)
+ msk->pm.local_addr_used++;
}
+
+ /* Special case for ID0: set the correct ID */
+ if (is_id0)
+ local->addr.id = 0;
+
+ msk->pm.extra_subflows++;
+ i++;
+
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
+ break;
}
rcu_read_unlock();
- /* If the array is empty, fill in the single
- * 'IPADDRANY' local address
+ return i;
+}
+
+static unsigned int
+fill_local_laminar_endp(struct mptcp_sock *msk, struct mptcp_addr_info *remote,
+ struct mptcp_pm_local *locals)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_pm_local *local;
+ int found = 0;
+
+ /* Forbid creation of new subflows matching existing ones, possibly
+ * already created by 'subflow' endpoints
*/
- if (!i) {
- memset(&locals[i], 0, sizeof(locals[i]));
- locals[i].addr.family =
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
- remote->family == AF_INET6 &&
- ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
-#endif
- remote->family;
+ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- if (!mptcp_pm_addr_families_match(sk, &locals[i].addr, remote))
- return 0;
+ if ((1 << inet_sk_state_load(ssk)) &
+ (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING |
+ TCPF_CLOSE))
+ continue;
+
+ __set_bit(subflow_get_local_id(subflow), unavail_id);
+ }
- msk->pm.subflows++;
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->endp_list, list) {
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR))
+ continue;
+
+ if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote))
+ continue;
+
+ if (test_bit(mptcp_endp_get_local_id(msk, &entry->addr),
+ unavail_id))
+ continue;
+
+ local = &locals[0];
+ local->addr = entry->addr;
+ local->flags = entry->flags;
+ local->ifindex = entry->ifindex;
+
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+
+ if (local->addr.id != msk->mpc_endpoint_id)
+ msk->pm.local_addr_used++;
+ }
+
+ msk->pm.extra_subflows++;
+ found = 1;
+ break;
+ }
+ rcu_read_unlock();
+
+ return found;
+}
+
+static unsigned int
+fill_local_addresses_vec_c_flag(struct mptcp_sock *msk,
+ struct mptcp_addr_info *remote,
+ struct mptcp_pm_local *locals)
+{
+ u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ u8 endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk);
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_local *local;
+ int i = 0;
+
+ while (msk->pm.local_addr_used < endp_subflow_max) {
+ local = &locals[i];
+
+ if (!select_local_address(pernet, msk, local))
+ break;
+
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+
+ if (!mptcp_pm_addr_families_match(sk, &local->addr, remote))
+ continue;
+
+ if (local->addr.id == msk->mpc_endpoint_id)
+ continue;
+
+ msk->pm.local_addr_used++;
+ msk->pm.extra_subflows++;
i++;
+
+ if (msk->pm.extra_subflows >= limit_extra_subflows)
+ break;
}
return i;
}
+static unsigned int
+fill_local_address_any(struct mptcp_sock *msk, struct mptcp_addr_info *remote,
+ struct mptcp_pm_local *local)
+{
+ struct sock *sk = (struct sock *)msk;
+
+ memset(local, 0, sizeof(*local));
+ local->addr.family =
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ remote->family == AF_INET6 &&
+ ipv6_addr_v4mapped(&remote->addr6) ? AF_INET :
+#endif
+ remote->family;
+
+ if (!mptcp_pm_addr_families_match(sk, &local->addr, remote))
+ return 0;
+
+ msk->pm.extra_subflows++;
+
+ return 1;
+}
+
+/* Fill all the local addresses into the array addrs[],
+ * and return the array size.
+ */
+static unsigned int
+fill_local_addresses_vec(struct mptcp_sock *msk, struct mptcp_addr_info *remote,
+ struct mptcp_pm_local *locals)
+{
+ bool c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk);
+ int i;
+
+ /* If there is at least one MPTCP endpoint with a fullmesh flag */
+ i = fill_local_addresses_vec_fullmesh(msk, remote, locals, c_flag_case);
+ if (i)
+ return i;
+
+ /* If there is at least one MPTCP endpoint with a laminar flag */
+ if (mptcp_pm_get_endp_laminar_max(msk))
+ return fill_local_laminar_endp(msk, remote, locals);
+
+ /* Special case: peer sets the C flag, accept one ADD_ADDR if default
+ * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints
+ */
+ if (c_flag_case)
+ return fill_local_addresses_vec_c_flag(msk, remote, locals);
+
+ /* No special case: fill in the single 'IPADDRANY' local address */
+ return fill_local_address_any(msk, remote, &locals[0]);
+}
+
static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
{
+ u8 limit_add_addr_accepted = mptcp_pm_get_limit_add_addr_accepted(msk);
+ u8 limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk);
struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX];
struct sock *sk = (struct sock *)msk;
- unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
- unsigned int subflows_max;
bool sf_created = false;
int i, nr;
- add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
- subflows_max = mptcp_pm_get_subflows_max(msk);
-
pr_debug("accepted %d:%d remote family %d\n",
- msk->pm.add_addr_accepted, add_addr_accept_max,
+ msk->pm.add_addr_accepted, limit_add_addr_accepted,
msk->pm.remote.family);
remote = msk->pm.remote;
mptcp_pm_announce_addr(msk, &remote, true);
mptcp_pm_addr_send_ack(msk);
+ mptcp_mpc_endpoint_setup(msk);
if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
return;
@@ -486,8 +664,8 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
/* add_addr_accepted is not decr for ID 0 */
if (remote.id)
msk->pm.add_addr_accepted++;
- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
+ if (msk->pm.add_addr_accepted >= limit_add_addr_accepted ||
+ msk->pm.extra_subflows >= limit_extra_subflows)
WRITE_ONCE(msk->pm.accept_addr, false);
}
}
@@ -495,10 +673,13 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
{
if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ u8 limit_add_addr_accepted =
+ mptcp_pm_get_limit_add_addr_accepted(msk);
+
/* Note: if the subflow has been closed before, this
* add_addr_accepted counter will not be decremented.
*/
- if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk))
+ if (--msk->pm.add_addr_accepted < limit_add_addr_accepted)
WRITE_ONCE(msk->pm.accept_addr, true);
}
}
@@ -523,8 +704,8 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
bool needs_id, bool replace)
{
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
- unsigned int addr_max;
int ret = -EINVAL;
+ u8 addr_max;
spin_lock_bh(&pernet->lock);
/* to keep the code simple, don't do IDR-like allocation for address ID,
@@ -532,7 +713,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
*/
if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
pernet->next_id = 1;
- if (pernet->addrs >= MPTCP_PM_ADDR_MAX) {
+ if (pernet->endpoints >= MPTCP_PM_ADDR_MAX) {
ret = -ERANGE;
goto out;
}
@@ -546,7 +727,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
*/
if (!address_use_port(entry))
entry->addr.port = 0;
- list_for_each_entry(cur, &pernet->local_addr_list, list) {
+ list_for_each_entry(cur, &pernet->endp_list, list) {
if (mptcp_addresses_equal(&cur->addr, &entry->addr,
cur->addr.port || entry->addr.port)) {
/* allow replacing the exiting endpoint only if such
@@ -571,7 +752,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
goto out;
}
- pernet->addrs--;
+ pernet->endpoints--;
entry->addr.id = cur->addr.id;
list_del_rcu(&cur->list);
del_entry = cur;
@@ -598,19 +779,23 @@ find_next:
pernet->next_id = entry->addr.id;
if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
- addr_max = pernet->add_addr_signal_max;
- WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1);
+ addr_max = pernet->endp_signal_max;
+ WRITE_ONCE(pernet->endp_signal_max, addr_max + 1);
}
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
- addr_max = pernet->local_addr_max;
- WRITE_ONCE(pernet->local_addr_max, addr_max + 1);
+ addr_max = pernet->endp_subflow_max;
+ WRITE_ONCE(pernet->endp_subflow_max, addr_max + 1);
+ }
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) {
+ addr_max = pernet->endp_laminar_max;
+ WRITE_ONCE(pernet->endp_laminar_max, addr_max + 1);
}
- pernet->addrs++;
+ pernet->endpoints++;
if (!entry->addr.port)
- list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
+ list_add_tail_rcu(&entry->list, &pernet->endp_list);
else
- list_add_rcu(&entry->list, &pernet->local_addr_list);
+ list_add_rcu(&entry->list, &pernet->endp_list);
ret = entry->addr.id;
out:
@@ -845,12 +1030,6 @@ out_free:
return ret;
}
-static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr)
-{
- return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
-}
-
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool force)
@@ -969,8 +1148,8 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
{
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
struct mptcp_pm_addr_entry addr, *entry;
- unsigned int addr_max;
struct nlattr *attr;
+ u8 addr_max;
int ret;
if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR))
@@ -997,15 +1176,19 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
- addr_max = pernet->add_addr_signal_max;
- WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1);
+ addr_max = pernet->endp_signal_max;
+ WRITE_ONCE(pernet->endp_signal_max, addr_max - 1);
}
if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
- addr_max = pernet->local_addr_max;
- WRITE_ONCE(pernet->local_addr_max, addr_max - 1);
+ addr_max = pernet->endp_subflow_max;
+ WRITE_ONCE(pernet->endp_subflow_max, addr_max - 1);
+ }
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_LAMINAR) {
+ addr_max = pernet->endp_laminar_max;
+ WRITE_ONCE(pernet->endp_laminar_max, addr_max - 1);
}
- pernet->addrs--;
+ pernet->endpoints--;
list_del_rcu(&entry->list);
__clear_bit(entry->addr.id, pernet->id_bitmap);
spin_unlock_bh(&pernet->lock);
@@ -1084,10 +1267,10 @@ static void __flush_addrs(struct list_head *list)
static void __reset_counters(struct pm_nl_pernet *pernet)
{
- WRITE_ONCE(pernet->add_addr_signal_max, 0);
- WRITE_ONCE(pernet->add_addr_accept_max, 0);
- WRITE_ONCE(pernet->local_addr_max, 0);
- pernet->addrs = 0;
+ WRITE_ONCE(pernet->endp_signal_max, 0);
+ WRITE_ONCE(pernet->endp_subflow_max, 0);
+ WRITE_ONCE(pernet->endp_laminar_max, 0);
+ pernet->endpoints = 0;
}
int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
@@ -1096,7 +1279,7 @@ int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
LIST_HEAD(free_list);
spin_lock_bh(&pernet->lock);
- list_splice_init(&pernet->local_addr_list, &free_list);
+ list_splice_init(&pernet->endp_list, &free_list);
__reset_counters(pernet);
pernet->next_id = 1;
bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
@@ -1182,18 +1365,18 @@ int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info)
int ret;
spin_lock_bh(&pernet->lock);
- rcv_addrs = pernet->add_addr_accept_max;
+ rcv_addrs = pernet->limit_add_addr_accepted;
ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
if (ret)
goto unlock;
- subflows = pernet->subflows_max;
+ subflows = pernet->limit_extra_subflows;
ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
if (ret)
goto unlock;
- WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
- WRITE_ONCE(pernet->subflows_max, subflows);
+ WRITE_ONCE(pernet->limit_add_addr_accepted, rcv_addrs);
+ WRITE_ONCE(pernet->limit_extra_subflows, subflows);
unlock:
spin_unlock_bh(&pernet->lock);
@@ -1216,11 +1399,11 @@ int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info)
goto fail;
if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
- READ_ONCE(pernet->add_addr_accept_max)))
+ READ_ONCE(pernet->limit_add_addr_accepted)))
goto fail;
if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
- READ_ONCE(pernet->subflows_max)))
+ READ_ONCE(pernet->limit_extra_subflows)))
goto fail;
genlmsg_end(msg, reply);
@@ -1329,7 +1512,7 @@ bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
- if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
+ if (msk->pm.extra_subflows == mptcp_pm_get_limit_extra_subflows(msk) ||
(find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
WRITE_ONCE(msk->pm.work_pending, false);
@@ -1361,12 +1544,11 @@ static int __net_init pm_nl_init_net(struct net *net)
{
struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
- INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
+ INIT_LIST_HEAD_RCU(&pernet->endp_list);
/* Cit. 2 subflows ought to be enough for anybody. */
- pernet->subflows_max = 2;
+ pernet->limit_extra_subflows = 2;
pernet->next_id = 1;
- pernet->stale_loss_cnt = 4;
spin_lock_init(&pernet->lock);
/* No need to initialize other pernet fields, the struct is zeroed at
@@ -1387,7 +1569,7 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
* other modifiers, also netns core already waited for a
* RCU grace period.
*/
- __flush_addrs(&pernet->local_addr_list);
+ __flush_addrs(&pernet->endp_list);
}
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 50aaf259959a..d5b383870f79 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -113,7 +113,7 @@ int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
return err;
if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
- u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+ s32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
entry->ifindex = val;
}
@@ -408,11 +408,23 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct sock *ssk)
{
int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
+ u16 flags = 0;
if (err)
return err;
- if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
+ if (READ_ONCE(msk->pm.server_side)) {
+ flags |= MPTCP_PM_EV_FLAG_SERVER_SIDE;
+
+ /* Deprecated, and only set when it is the server side */
+ if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, 1))
+ return -EMSGSIZE;
+ }
+
+ if (READ_ONCE(msk->pm.remote_deny_join_id0))
+ flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0;
+
+ if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags))
return -EMSGSIZE;
return mptcp_event_add_subflow(skb, ssk);
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index a715dcbe0146..8cbc1920afb4 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -419,7 +419,7 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
if (err)
mptcp_userspace_pm_delete_local_addr(msk, &entry);
else
- msk->pm.subflows++;
+ msk->pm.extra_subflows++;
spin_unlock_bh(&msk->pm.lock);
create_err:
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index edf14c2c2062..0292162a14ee 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
#include <linux/atomic.h>
+#include <net/aligned_data.h>
+#include <net/rps.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
@@ -67,6 +69,26 @@ static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
return &inet_stream_ops;
}
+bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
+{
+ struct net *net = sock_net((struct sock *)msk);
+
+ if (__mptcp_check_fallback(msk))
+ return true;
+
+ spin_lock_bh(&msk->fallback_lock);
+ if (!msk->allow_infinite_fallback) {
+ spin_unlock_bh(&msk->fallback_lock);
+ return false;
+ }
+
+ msk->allow_subflows = false;
+ set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+ __MPTCP_INC_STATS(net, fb_mib);
+ spin_unlock_bh(&msk->fallback_lock);
+ return true;
+}
+
static int __mptcp_socket_create(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
@@ -116,26 +138,37 @@ struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
{
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
__kfree_skb(skb);
}
-static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
- struct sk_buff *from)
+static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from, bool *fragstolen,
+ int *delta)
{
- bool fragstolen;
- int delta;
+ int limit = READ_ONCE(sk->sk_rcvbuf);
if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
MPTCP_SKB_CB(from)->offset ||
- ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
- !skb_try_coalesce(to, from, &fragstolen, &delta))
+ ((to->len + from->len) > (limit >> 3)) ||
+ !skb_try_coalesce(to, from, fragstolen, delta))
return false;
pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+ return true;
+}
+
+static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ bool fragstolen;
+ int delta;
+
+ if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
+ return false;
/* note the fwd memory can reach a negative value after accounting
* for the delta, but the later skb free will restore a non
@@ -157,6 +190,35 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
return mptcp_try_coalesce((struct sock *)msk, to, from);
}
+/* "inspired" by tcp_rcvbuf_grow(), main difference:
+ * - mptcp does not maintain a msk-level window clamp
+ * - returns true when the receive buffer is actually updated
+ */
+static bool mptcp_rcvbuf_grow(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ const struct net *net = sock_net(sk);
+ int rcvwin, rcvbuf, cap;
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
+ (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
+ return false;
+
+ rcvwin = msk->rcvq_space.space << 1;
+
+ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
+ rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq;
+
+ cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
+
+ rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
+ if (rcvbuf > sk->sk_rcvbuf) {
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ return true;
+ }
+ return false;
+}
+
/* "inspired" by tcp_data_queue_ofo(), main differences:
* - use mptcp seqs
* - don't cope with sacks
@@ -270,29 +332,16 @@ merge_right:
end:
skb_condense(skb);
skb_set_owner_r(skb, sk);
+ /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
+ if (sk->sk_socket)
+ mptcp_rcvbuf_grow(sk);
}
-static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
- struct sk_buff *skb, unsigned int offset,
- size_t copy_len)
+static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
+ int copy_len)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- struct sock *sk = (struct sock *)msk;
- struct sk_buff *tail;
- bool has_rxtstamp;
-
- __skb_unlink(skb, &ssk->sk_receive_queue);
-
- skb_ext_reset(skb);
- skb_orphan(skb);
-
- /* try to fetch required memory from subflow */
- if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
- goto drop;
- }
-
- has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+ const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
/* the skb map_seq accounts for the skb offset:
* mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
@@ -304,6 +353,24 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
MPTCP_SKB_CB(skb)->cant_coalesce = 0;
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+
+ skb_ext_reset(skb);
+ skb_dst_drop(skb);
+}
+
+static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
+{
+ u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tail;
+
+ /* try to fetch required memory from subflow */
+ if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ goto drop;
+ }
+
if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
/* in sequence */
msk->bytes_received += copy_len;
@@ -350,6 +417,20 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ unlock_sock_fast(ssk, slow);
+ }
+}
+
/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
@@ -374,6 +455,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
break;
case TCP_CLOSING:
case TCP_LAST_ACK:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
}
@@ -508,11 +590,10 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
}
}
-static bool mptcp_check_data_fin(struct sock *sk)
+static void mptcp_check_data_fin(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
u64 rcv_data_fin_seq;
- bool ret = false;
/* Need to ack a DATA_FIN received from a peer while this side
* of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
@@ -542,6 +623,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
mptcp_set_state(sk, TCP_CLOSING);
break;
case TCP_FIN_WAIT2:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
default:
@@ -550,21 +632,15 @@ static bool mptcp_check_data_fin(struct sock *sk)
break;
}
- ret = true;
if (!__mptcp_check_fallback(msk))
mptcp_send_ack(msk);
mptcp_close_wake_up(sk);
}
- return ret;
}
static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
{
- if (READ_ONCE(msk->allow_infinite_fallback)) {
- MPTCP_INC_STATS(sock_net(ssk),
- MPTCP_MIB_DSSCORRUPTIONFALLBACK);
- mptcp_do_fallback(ssk);
- } else {
+ if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
mptcp_subflow_reset(ssk);
}
@@ -615,7 +691,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
if (offset < skb->len) {
size_t len = skb->len - offset;
- ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret;
+ mptcp_init_skb(ssk, skb, offset, len);
+ skb_orphan(skb);
+ ret = __mptcp_move_skb(sk, skb) || ret;
seq += len;
if (unlikely(map_remaining < len)) {
@@ -736,12 +814,8 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
moved = __mptcp_move_skbs_from_subflow(msk, ssk);
__mptcp_ofo_queue(msk);
- if (unlikely(ssk->sk_err)) {
- if (!sock_owned_by_user(sk))
- __mptcp_error_report(sk);
- else
- __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags);
- }
+ if (unlikely(ssk->sk_err))
+ __mptcp_subflow_error_report(sk, ssk);
/* If the moves have caught up with the DATA_FIN sequence number
* it's time to ack the DATA_FIN and change socket state, but
@@ -753,18 +827,10 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return moved;
}
-static void __mptcp_rcvbuf_update(struct sock *sk, struct sock *ssk)
-{
- if (unlikely(ssk->sk_rcvbuf > sk->sk_rcvbuf))
- WRITE_ONCE(sk->sk_rcvbuf, ssk->sk_rcvbuf);
-}
-
static void __mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- __mptcp_rcvbuf_update(sk, ssk);
-
/* Wake-up the reader only for in-sequence data */
if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
sk->sk_data_ready(sk);
@@ -792,7 +858,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
{
mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
- WRITE_ONCE(msk->allow_infinite_fallback, false);
+ msk->allow_infinite_fallback = false;
mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
}
@@ -803,6 +869,14 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_state != TCP_ESTABLISHED)
return false;
+ spin_lock_bh(&msk->fallback_lock);
+ if (!msk->allow_subflows) {
+ spin_unlock_bh(&msk->fallback_lock);
+ return false;
+ }
+ mptcp_subflow_joined(msk, ssk);
+ spin_unlock_bh(&msk->fallback_lock);
+
/* attach to msk socket only after we are sure we will deal with it
* at close time
*/
@@ -811,7 +885,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
mptcp_sockopt_sync_locked(msk, ssk);
- mptcp_subflow_joined(msk, ssk);
mptcp_stop_tout_timer(sk);
__mptcp_propagate_sndbuf(sk, ssk);
return true;
@@ -1136,10 +1209,13 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
mpext->infinite_map = 1;
mpext->data_len = 0;
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
+ if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
+ mptcp_subflow_reset(ssk);
+ return;
+ }
+
mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
- pr_fallback(msk);
- mptcp_do_fallback(ssk);
}
#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
@@ -1376,7 +1452,7 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
* - estimate the faster flow linger time
* - use the above to estimate the amount of byte transferred
* by the faster flow
- * - check that the amount of queued data is greter than the above,
+ * - check that the amount of queued data is greater than the above,
* otherwise do not use the picked, slower, subflow
* We select the subflow with the shorter estimated time to flush
* the queued mem, which basically ensure the above. We just need
@@ -1713,6 +1789,20 @@ static u32 mptcp_send_limit(const struct sock *sk)
return limit - not_sent;
}
+static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!rfs_is_needed())
+ return;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ sock_rps_record_flow(ssk);
+ }
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1726,6 +1816,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
lock_sock(sk);
+ mptcp_rps_record_subflows(msk);
+
if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
msg->msg_flags & MSG_FASTOPEN)) {
int copied_syn = 0;
@@ -1886,12 +1978,13 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
}
if (!(flags & MSG_PEEK)) {
- /* avoid the indirect call, we know the destructor is sock_wfree */
+ /* avoid the indirect call, we know the destructor is sock_rfree */
skb->destructor = NULL;
+ skb->sk = NULL;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_uncharge(sk, skb->truesize);
__skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
+ skb_attempt_defer_free(skb);
msk->bytes_consumed += count;
}
@@ -1956,48 +2049,26 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
goto new_measure;
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
- u64 rcvwin, grow;
- int rcvbuf;
-
- rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
-
- grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
-
- do_div(grow, msk->rcvq_space.space);
- rcvwin += (grow << 1);
-
- rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin),
- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
-
- if (rcvbuf > sk->sk_rcvbuf) {
- u32 window_clamp;
-
- window_clamp = mptcp_win_from_space(sk, rcvbuf);
- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ msk->rcvq_space.space = msk->rcvq_space.copied;
+ if (mptcp_rcvbuf_grow(sk)) {
- /* Make subflows follow along. If we do not do this, we
- * get drops at subflow level if skbs can't be moved to
- * the mptcp rx queue fast enough (announced rcv_win can
- * exceed ssk->sk_rcvbuf).
- */
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk;
- bool slow;
+ /* Make subflows follow along. If we do not do this, we
+ * get drops at subflow level if skbs can't be moved to
+ * the mptcp rx queue fast enough (announced rcv_win can
+ * exceed ssk->sk_rcvbuf).
+ */
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk;
+ bool slow;
- ssk = mptcp_subflow_tcp_sock(subflow);
- slow = lock_sock_fast(ssk);
- WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
- WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp);
- if (tcp_can_send_ack(ssk))
- tcp_cleanup_rbuf(ssk, 1);
- unlock_sock_fast(ssk, slow);
- }
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
+ tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
+ tcp_rcvbuf_grow(ssk);
+ unlock_sock_fast(ssk, slow);
}
}
- msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
msk->rcvq_space.copied = 0;
msk->rcvq_space.time = mstamp;
@@ -2026,11 +2097,6 @@ static bool __mptcp_move_skbs(struct sock *sk)
if (list_empty(&msk->conn_list))
return false;
- /* verify we can move any data from the subflow, eventually updating */
- if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
- mptcp_for_each_subflow(msk, subflow)
- __mptcp_rcvbuf_update(sk, subflow->tcp_sock);
-
subflow = list_first_entry(&msk->conn_list,
struct mptcp_subflow_context, node);
for (;;) {
@@ -2104,6 +2170,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out_err;
}
+ mptcp_rps_record_subflows(msk);
+
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
len = min_t(size_t, len, INT_MAX);
@@ -2146,14 +2214,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
- if (sk->sk_shutdown & RCV_SHUTDOWN) {
- /* race breaker: the shutdown could be after the
- * previous receive queue check
- */
- if (__mptcp_move_skbs(sk))
- continue;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- }
if (sk->sk_state == TCP_CLOSE) {
copied = -ENOTCONN;
@@ -2543,9 +2605,9 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
static void __mptcp_retrans(struct sock *sk)
{
+ struct mptcp_sendmsg_info info = { .data_lock_held = true, };
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_subflow_context *subflow;
- struct mptcp_sendmsg_info info = {};
struct mptcp_data_frag *dfrag;
struct sock *ssk;
int ret, err;
@@ -2560,7 +2622,8 @@ static void __mptcp_retrans(struct sock *sk)
if (mptcp_data_fin_enabled(msk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_retransmits++;
+ WRITE_ONCE(icsk->icsk_retransmits,
+ icsk->icsk_retransmits + 1);
mptcp_set_datafin_timeout(sk);
mptcp_send_ack(msk);
@@ -2590,6 +2653,18 @@ static void __mptcp_retrans(struct sock *sk)
info.sent = 0;
info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
dfrag->already_sent;
+
+ /*
+ * make the whole retrans decision, xmit, disallow
+ * fallback atomic
+ */
+ spin_lock_bh(&msk->fallback_lock);
+ if (__mptcp_check_fallback(msk)) {
+ spin_unlock_bh(&msk->fallback_lock);
+ release_sock(ssk);
+ return;
+ }
+
while (info.sent < info.limit) {
ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
if (ret <= 0)
@@ -2603,8 +2678,9 @@ static void __mptcp_retrans(struct sock *sk)
len = max(copied, len);
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
info.size_goal);
- WRITE_ONCE(msk->allow_infinite_fallback, false);
+ msk->allow_infinite_fallback = false;
}
+ spin_unlock_bh(&msk->fallback_lock);
release_sock(ssk);
}
@@ -2730,7 +2806,8 @@ static void __mptcp_init_sock(struct sock *sk)
WRITE_ONCE(msk->first, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
- WRITE_ONCE(msk->allow_infinite_fallback, true);
+ msk->allow_infinite_fallback = true;
+ msk->allow_subflows = true;
msk->recovery = false;
msk->subflow_id = 1;
msk->last_data_sent = tcp_jiffies32;
@@ -2738,6 +2815,7 @@ static void __mptcp_init_sock(struct sock *sk)
msk->last_ack_recv = tcp_jiffies32;
mptcp_pm_data_init(msk);
+ spin_lock_init(&msk->fallback_lock);
/* re-use the csk retrans timer for MPTCP-level retrans */
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
@@ -3117,7 +3195,16 @@ static int mptcp_disconnect(struct sock *sk, int flags)
* subflow
*/
mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
+
+ /* The first subflow is already in TCP_CLOSE status, the following
+ * can't overlap with a fallback anymore
+ */
+ spin_lock_bh(&msk->fallback_lock);
+ msk->allow_subflows = true;
+ msk->allow_infinite_fallback = true;
WRITE_ONCE(msk->flags, 0);
+ spin_unlock_bh(&msk->fallback_lock);
+
msk->cb_flags = 0;
msk->recovery = false;
WRITE_ONCE(msk->can_ack, false);
@@ -3503,7 +3590,6 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_lock_bh(&sk->sk_callback_lock);
rcu_assign_pointer(sk->sk_wq, &parent->wq);
sk_set_socket(sk, parent);
- sk->sk_uid = SOCK_INODE(parent)->i_uid;
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -3524,7 +3610,13 @@ bool mptcp_finish_join(struct sock *ssk)
/* active subflow, already present inside the conn_list */
if (!list_empty(&subflow->node)) {
+ spin_lock_bh(&msk->fallback_lock);
+ if (!msk->allow_subflows) {
+ spin_unlock_bh(&msk->fallback_lock);
+ return false;
+ }
mptcp_subflow_joined(msk, ssk);
+ spin_unlock_bh(&msk->fallback_lock);
mptcp_propagate_sndbuf(parent, ssk);
return true;
}
@@ -3648,16 +3740,15 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* TCP option space.
*/
if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
- mptcp_subflow_early_fallback(msk, subflow);
+ mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
#endif
if (subflow->request_mptcp) {
- if (mptcp_active_should_disable(sk)) {
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
- mptcp_subflow_early_fallback(msk, subflow);
- } else if (mptcp_token_new_connect(ssk) < 0) {
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
- mptcp_subflow_early_fallback(msk, subflow);
- }
+ if (mptcp_active_should_disable(sk))
+ mptcp_early_fallback(msk, subflow,
+ MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
+ else if (mptcp_token_new_connect(ssk) < 0)
+ mptcp_early_fallback(msk, subflow,
+ MPTCP_MIB_TOKENFALLBACKINIT);
}
WRITE_ONCE(msk->write_seq, subflow->idsn);
@@ -3729,7 +3820,7 @@ static struct proto mptcp_prot = {
.stream_memory_free = mptcp_stream_memory_free,
.sockets_allocated = &mptcp_sockets_allocated,
- .memory_allocated = &tcp_memory_allocated,
+ .memory_allocated = &net_aligned_data.tcp_memory_allocated,
.per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
.memory_pressure = &tcp_memory_pressure,
@@ -3865,6 +3956,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
mptcp_sock_graft(ssk, newsock);
}
+ mptcp_rps_record_subflows(msk);
+
/* Do late cleanup for the first subflow as necessary. Also
* deal with bad peers not doing a complete shutdown.
*/
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3dd11dd3ba16..52f9cfa4ce95 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -235,7 +235,7 @@ struct mptcp_pm_data {
u8 add_addr_accepted;
u8 local_addr_used;
u8 pm_type;
- u8 subflows;
+ u8 extra_subflows;
u8 status;
);
@@ -326,6 +326,7 @@ struct mptcp_sock {
int keepalive_cnt;
int keepalive_idle;
int keepalive_intvl;
+ int maxseg;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -340,16 +341,22 @@ struct mptcp_sock {
struct mptcp_pm_data pm;
struct mptcp_sched_ops *sched;
struct {
- u32 space; /* bytes copied in last measurement window */
- u32 copied; /* bytes copied in this measurement window */
+ int space; /* bytes copied in last measurement window */
+ int copied; /* bytes copied in this measurement window */
u64 time; /* start time of measurement window */
u64 rtt_us; /* last maximum rtt of subflows */
} rcvq_space;
u8 scaling_ratio;
+ bool allow_subflows;
u32 subflow_id;
u32 setsockopt_seq;
char ca_name[TCP_CA_NAME_MAX];
+
+ spinlock_t fallback_lock; /* protects fallback,
+ * allow_infinite_fallback and
+ * allow_join
+ */
};
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
@@ -781,9 +788,7 @@ static inline bool mptcp_epollin_ready(const struct sock *sk)
* as it can always coalesce them
*/
return (data_avail >= sk->sk_rcvlowat) ||
- (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
- READ_ONCE(tcp_memory_pressure);
+ tcp_under_memory_pressure(sk);
}
int mptcp_set_rcvlowat(struct sock *sk, int val);
@@ -1175,15 +1180,16 @@ void __init mptcp_pm_userspace_register(void);
void __init mptcp_pm_nl_init(void);
void mptcp_pm_worker(struct mptcp_sock *msk);
void __mptcp_pm_kernel_worker(struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk);
/* called under PM lock */
static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
{
- if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
+ if (--msk->pm.extra_subflows < mptcp_pm_get_limit_extra_subflows(msk))
WRITE_ONCE(msk->pm.accept_subflow, true);
}
@@ -1194,6 +1200,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
+static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.remote_deny_join_id0) &&
+ msk->pm.local_addr_used == 0 &&
+ mptcp_pm_get_limit_add_addr_accepted(msk) == 0 &&
+ msk->pm.extra_subflows < mptcp_pm_get_limit_extra_subflows(msk);
+}
+
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
@@ -1216,17 +1230,6 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
return __mptcp_check_fallback(msk);
}
-static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
-{
- if (__mptcp_check_fallback(msk)) {
- pr_debug("TCP fallback already done (msk=%p)\n", msk);
- return;
- }
- if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback)))
- return;
- set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
-}
-
static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
{
struct sock *ssk = READ_ONCE(msk->first);
@@ -1236,14 +1239,17 @@ static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
TCPF_SYN_RECV | TCPF_LISTEN));
}
-static inline void mptcp_do_fallback(struct sock *ssk)
+bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib);
+
+static inline bool mptcp_try_fallback(struct sock *ssk, int fb_mib)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
struct mptcp_sock *msk;
msk = mptcp_sk(sk);
- __mptcp_do_fallback(msk);
+ if (!__mptcp_try_fallback(msk, fb_mib))
+ return false;
if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
gfp_t saved_allocation = ssk->sk_allocation;
@@ -1255,16 +1261,15 @@ static inline void mptcp_do_fallback(struct sock *ssk)
tcp_shutdown(ssk, SEND_SHUTDOWN);
ssk->sk_allocation = saved_allocation;
}
+ return true;
}
-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
-
-static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
+static inline void mptcp_early_fallback(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ int fb_mib)
{
- pr_fallback(msk);
subflow->request_mptcp = 0;
- __mptcp_do_fallback(msk);
+ WARN_ON_ONCE(!__mptcp_try_fallback(msk, fb_mib));
}
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 3caa0a9d3b38..a28a48385885 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -798,6 +798,23 @@ unlock:
return ret;
}
+static int mptcp_setsockopt_all_sf(struct mptcp_sock *msk, int level,
+ int optname, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ int ret = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -859,6 +876,11 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
&msk->keepalive_cnt,
val);
break;
+ case TCP_MAXSEG:
+ msk->maxseg = val;
+ ret = mptcp_setsockopt_all_sf(msk, SOL_TCP, optname, optval,
+ optlen);
+ break;
default:
ret = -ENOPROTOOPT;
}
@@ -914,10 +936,8 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
lock_sock(sk);
ssk = msk->first;
- if (ssk) {
- ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
- goto out;
- }
+ if (ssk)
+ goto get;
ssk = __mptcp_nmpc_sk(msk);
if (IS_ERR(ssk)) {
@@ -925,6 +945,7 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
goto out;
}
+get:
ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
out:
@@ -941,7 +962,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
memset(info, 0, sizeof(*info));
- info->mptcpi_subflows = READ_ONCE(msk->pm.subflows);
+ info->mptcpi_extra_subflows = READ_ONCE(msk->pm.extra_subflows);
info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled);
info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted);
info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used);
@@ -951,14 +972,16 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
/* The following limits only make sense for the in-kernel PM */
if (mptcp_pm_is_kernel(msk)) {
- info->mptcpi_subflows_max =
- mptcp_pm_get_subflows_max(msk);
- info->mptcpi_add_addr_signal_max =
- mptcp_pm_get_add_addr_signal_max(msk);
- info->mptcpi_add_addr_accepted_max =
- mptcp_pm_get_add_addr_accept_max(msk);
- info->mptcpi_local_addr_max =
- mptcp_pm_get_local_addr_max(msk);
+ info->mptcpi_limit_extra_subflows =
+ mptcp_pm_get_limit_extra_subflows(msk);
+ info->mptcpi_endp_signal_max =
+ mptcp_pm_get_endp_signal_max(msk);
+ info->mptcpi_limit_add_addr_accepted =
+ mptcp_pm_get_limit_add_addr_accepted(msk);
+ info->mptcpi_endp_subflow_max =
+ mptcp_pm_get_endp_subflow_max(msk);
+ info->mptcpi_endp_laminar_max =
+ mptcp_pm_get_endp_laminar_max(msk);
}
if (__mptcp_check_fallback(msk))
@@ -975,7 +998,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
info->mptcpi_bytes_sent = msk->bytes_sent;
info->mptcpi_bytes_received = msk->bytes_received;
info->mptcpi_bytes_retrans = msk->bytes_retrans;
- info->mptcpi_subflows_total = info->mptcpi_subflows +
+ info->mptcpi_subflows_total = info->mptcpi_extra_subflows +
__mptcp_has_initial_subflow(msk);
now = tcp_jiffies32;
info->mptcpi_last_data_sent = jiffies_to_msecs(now - msk->last_data_sent);
@@ -1407,6 +1430,9 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
case TCP_IS_MPTCP:
return mptcp_put_int_option(msk, optval, optlen, 1);
+ case TCP_MAXSEG:
+ return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
+ optval, optlen);
}
return -EOPNOTSUPP;
}
@@ -1508,13 +1534,12 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
{
static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
struct sock *sk = (struct sock *)msk;
+ bool keep_open;
- if (ssk->sk_prot->keepalive) {
- if (sock_flag(sk, SOCK_KEEPOPEN))
- ssk->sk_prot->keepalive(ssk, 1);
- else
- ssk->sk_prot->keepalive(ssk, 0);
- }
+ keep_open = sock_flag(sk, SOCK_KEEPOPEN);
+ if (ssk->sk_prot->keepalive)
+ ssk->sk_prot->keepalive(ssk, keep_open);
+ sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
ssk->sk_priority = sk->sk_priority;
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
@@ -1553,6 +1578,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
+ tcp_sock_set_maxseg(ssk, msk->maxseg);
inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 15613d691bfe..e8325890a322 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -544,10 +544,13 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
- MPTCP_INC_STATS(sock_net(sk),
- MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
- mptcp_do_fallback(sk);
- pr_fallback(msk);
+ if (!mptcp_try_fallback(sk,
+ MPTCP_MIB_MPCAPABLEACTIVEFALLBACK)) {
+ MPTCP_INC_STATS(sock_net(sk),
+ MPTCP_MIB_FALLBACKFAILED);
+ goto do_reset;
+ }
+
goto fallback;
}
@@ -880,6 +883,10 @@ create_child:
ctx->subflow_id = 1;
owner = mptcp_sk(ctx->conn);
+
+ if (mp_opt.deny_join_id0)
+ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
+
mptcp_pm_new_connection(owner, child, 1);
/* with OoO packets we can reach here without ingress
@@ -1300,20 +1307,29 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
mptcp_schedule_work(sk);
}
-static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
+static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
unsigned long fail_tout;
+ /* we are really failing, prevent any later subflow join */
+ spin_lock_bh(&msk->fallback_lock);
+ if (!msk->allow_infinite_fallback) {
+ spin_unlock_bh(&msk->fallback_lock);
+ return false;
+ }
+ msk->allow_subflows = false;
+ spin_unlock_bh(&msk->fallback_lock);
+
/* graceful failure can happen only on the MPC subflow */
if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
- return;
+ return false;
/* since the close timeout take precedence on the fail one,
* no need to start the latter when the first is already set
*/
if (sock_flag((struct sock *)msk, SOCK_DEAD))
- return;
+ return true;
/* we don't need extreme accuracy here, use a zero fail_tout as special
* value meaning no fail timeout at all;
@@ -1325,6 +1341,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
tcp_send_ack(ssk);
mptcp_reset_tout_timer(msk, subflow->fail_tout);
+ return true;
}
static bool subflow_check_data_avail(struct sock *ssk)
@@ -1385,17 +1402,16 @@ fallback:
(subflow->mp_join || subflow->valid_csum_seen)) {
subflow->send_mp_fail = 1;
- if (!READ_ONCE(msk->allow_infinite_fallback)) {
+ if (!mptcp_subflow_fail(msk, ssk)) {
subflow->reset_transient = 0;
subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
goto reset;
}
- mptcp_subflow_fail(msk, ssk);
WRITE_ONCE(subflow->data_avail, true);
return true;
}
- if (!READ_ONCE(msk->allow_infinite_fallback)) {
+ if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSFALLBACK)) {
/* fatal protocol error, close the socket.
* subflow_error_report() will introduce the appropriate barriers
*/
@@ -1413,8 +1429,6 @@ reset:
WRITE_ONCE(subflow->data_avail, false);
return false;
}
-
- mptcp_do_fallback(ssk);
}
skb = skb_peek(&ssk->sk_receive_queue);
@@ -1679,7 +1693,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local,
/* discard the subflow socket */
mptcp_sock_graft(ssk, sk->sk_socket);
iput(SOCK_INODE(sf));
- WRITE_ONCE(msk->allow_infinite_fallback, false);
mptcp_stop_tout_timer(sk);
return 0;
@@ -1708,19 +1721,14 @@ static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
/* only the additional subflows created by kworkers have to be modified */
if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
cgroup_id(sock_cgroup_ptr(child_skcd))) {
-#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg = parent->sk_memcg;
-
- mem_cgroup_sk_free(child);
- if (memcg && css_tryget(&memcg->css))
- child->sk_memcg = memcg;
-#endif /* CONFIG_MEMCG */
-
cgroup_sk_free(child_skcd);
*child_skcd = *parent_skcd;
cgroup_sk_clone(child_skcd);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
+
+ if (mem_cgroup_sockets_enabled)
+ mem_cgroup_sk_inherit(parent, child);
}
static void mptcp_subflow_ops_override(struct sock *ssk)
@@ -1845,14 +1853,11 @@ static void subflow_state_change(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct sock *parent = subflow->conn;
- struct mptcp_sock *msk;
__subflow_state_change(sk);
- msk = mptcp_sk(parent);
if (subflow_simultaneous_connect(sk)) {
- mptcp_do_fallback(sk);
- pr_fallback(msk);
+ WARN_ON_ONCE(!mptcp_try_fallback(sk, MPTCP_MIB_SIMULTCONNFALLBACK));
subflow->conn_finished = 1;
mptcp_propagate_state(parent, sk, subflow, NULL);
}
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index e76c6de0c784..adee6dcabdc3 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -110,7 +110,7 @@ struct ncsi_channel_version {
u8 update; /* NCSI version update */
char alpha1; /* NCSI version alpha1 */
char alpha2; /* NCSI version alpha2 */
- u8 fw_name[12]; /* Firmware name string */
+ u8 fw_name[12 + 1]; /* Firmware name string */
u32 fw_version; /* Firmware version */
u16 pci_ids[4]; /* PCI identification */
u32 mf_id; /* Manufacture ID */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 472cc68ad86f..271ec6c3929e 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
ncv->alpha1 = rsp->alpha1;
ncv->alpha2 = rsp->alpha2;
memcpy(ncv->fw_name, rsp->fw_name, 12);
+ ncv->fw_name[12] = '\0';
ncv->fw_version = ntohl(rsp->fw_version);
for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++)
ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 2560416218d0..6cdc994fdc8a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -195,16 +195,6 @@ config NF_CONNTRACK_LABELS
config NF_CONNTRACK_OVS
bool
-config NF_CT_PROTO_DCCP
- bool 'DCCP protocol connection tracking support'
- depends on NETFILTER_ADVANCED
- default y
- help
- With this option enabled, the layer 3 independent connection
- tracking code will be able to do state tracking on DCCP connections.
-
- If unsure, say Y.
-
config NF_CT_PROTO_GRE
bool
@@ -516,6 +506,12 @@ config NFT_CT
This option adds the "ct" expression that you can use to match
connection tracking information such as the flow state.
+config NFT_EXTHDR_DCCP
+ bool "Netfilter nf_tables exthdr DCCP support (DEPRECATED)"
+ default n
+ help
+ This option adds support for matching on DCCP extension headers.
+
config NFT_FLOW_OFFLOAD
depends on NF_CONNTRACK && NF_FLOW_TABLE
tristate "Netfilter nf_tables hardware flow offload module"
@@ -762,6 +758,16 @@ config NETFILTER_XTABLES_COMPAT
If unsure, say N.
+config NETFILTER_XTABLES_LEGACY
+ bool "Netfilter legacy tables support"
+ depends on !PREEMPT_RT
+ help
+ Say Y here if you still require support for legacy tables. This is
+ required by the legacy tools (iptables-legacy) and is not needed if
+ you use iptables over nftables (iptables-nft).
+ Legacy support is not limited to IP, it also includes EBTABLES and
+ ARPTABLES.
+
comment "Xtables combined modules"
config NETFILTER_XT_MARK
@@ -1278,9 +1284,9 @@ config NETFILTER_XT_MATCH_CPU
To compile it as a module, choose M here. If unsure, say N.
config NETFILTER_XT_MATCH_DCCP
- tristate '"dccp" protocol match support'
+ tristate '"dccp" protocol match support (DEPRECATED)'
depends on NETFILTER_ADVANCED
- default IP_DCCP
+ default n
help
With this option enabled, you will be able to use the iptables
`dccp' match in order to match on DCCP source/destination ports
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f0aa4d7ef499..e43e20f529f8 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -12,7 +12,6 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_OVS) += nf_conntrack_ovs.o
-nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
ifeq ($(CONFIG_NF_CONNTRACK),m)
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 5251524b96af..5e4453e9ef8e 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -63,7 +63,7 @@ struct hbucket {
: jhash_size((htable_bits) - HTABLE_REGION_BITS))
#define ahash_sizeof_regions(htable_bits) \
(ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
-#define ahash_region(n, htable_bits) \
+#define ahash_region(n) \
((n) / jhash_size(HTABLE_REGION_BITS))
#define ahash_bucket_start(h, htable_bits) \
((htable_bits) < HTABLE_REGION_BITS ? 0 \
@@ -702,7 +702,7 @@ retry:
#endif
key = HKEY(data, h->initval, htable_bits);
m = __ipset_dereference(hbucket(t, key));
- nr = ahash_region(key, htable_bits);
+ nr = ahash_region(key);
if (!m) {
m = kzalloc(sizeof(*m) +
AHASH_INIT_SIZE * dsize,
@@ -852,7 +852,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- r = ahash_region(key, t->htable_bits);
+ r = ahash_region(key);
atomic_inc(&t->uref);
elements = t->hregion[r].elements;
maxelem = t->maxelem;
@@ -1050,7 +1050,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- r = ahash_region(key, t->htable_bits);
+ r = ahash_region(key);
atomic_inc(&t->uref);
rcu_read_unlock_bh();
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 44b2ad695c15..37ebb0cb62b8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -885,7 +885,7 @@ static void ip_vs_conn_expire(struct timer_list *t)
* conntrack cleanup for the net.
*/
smp_rmb();
- if (ipvs->enable)
+ if (READ_ONCE(ipvs->enable))
ip_vs_conn_drop_conntrack(cp);
}
@@ -926,7 +926,7 @@ static void ip_vs_conn_expire(struct timer_list *t)
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
/* Using mod_timer_pending will ensure the timer is not
- * modified after the final del_timer in ip_vs_conn_expire.
+ * modified after the final timer_delete in ip_vs_conn_expire.
*/
if (timer_pending(&cp->timer) &&
time_after(cp->timer.expires, jiffies))
@@ -1439,7 +1439,7 @@ void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs)
cond_resched_rcu();
/* netns clean up started, abort delayed work */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
break;
}
rcu_read_unlock();
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index c7a8a08b7308..5ea7ab8bf4dc 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1353,9 +1353,6 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
- if (!ipvs->enable)
- return NF_ACCEPT;
-
ip_vs_fill_iph_skb(af, skb, false, &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
@@ -1940,7 +1937,7 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
return NF_ACCEPT;
}
/* ipvs enabled in this netns ? */
- if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
+ if (unlikely(sysctl_backup_only(ipvs)))
return NF_ACCEPT;
ip_vs_fill_iph_skb(af, skb, false, &iph);
@@ -2108,7 +2105,7 @@ ip_vs_forward_icmp(void *priv, struct sk_buff *skb,
int r;
/* ipvs enabled in this netns ? */
- if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
+ if (unlikely(sysctl_backup_only(ipvs)))
return NF_ACCEPT;
if (state->pf == NFPROTO_IPV4) {
@@ -2295,7 +2292,7 @@ static int __net_init __ip_vs_init(struct net *net)
return -ENOMEM;
/* Hold the beast until a service is registered */
- ipvs->enable = 0;
+ WRITE_ONCE(ipvs->enable, 0);
ipvs->net = net;
/* Counters used for creating unique names */
ipvs->gen = atomic_read(&ipvs_netns_cnt);
@@ -2367,7 +2364,7 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
ipvs = net_ipvs(net);
ip_vs_unregister_hooks(ipvs, AF_INET);
ip_vs_unregister_hooks(ipvs, AF_INET6);
- ipvs->enable = 0; /* Disable packet reception */
+ WRITE_ONCE(ipvs->enable, 0); /* Disable packet reception */
smp_wmb();
ip_vs_sync_net_cleanup(ipvs);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 6a6fc4478533..4c8fa22be88a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -256,7 +256,7 @@ static void est_reload_work_handler(struct work_struct *work)
struct ip_vs_est_kt_data *kd = ipvs->est_kt_arr[id];
/* netns clean up started, abort delayed work */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
goto unlock;
if (!kd)
continue;
@@ -1483,9 +1483,9 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
*svc_p = svc;
- if (!ipvs->enable) {
+ if (!READ_ONCE(ipvs->enable)) {
/* Now there is a service - full throttle */
- ipvs->enable = 1;
+ WRITE_ONCE(ipvs->enable, 1);
/* Start estimation for first time */
ip_vs_est_reload_start(ipvs);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index f821ad2e19b3..93a925f1ed9b 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -231,7 +231,7 @@ static int ip_vs_estimation_kthread(void *data)
void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
{
/* Ignore reloads before first service is added */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
return;
ip_vs_est_stopped_recalc(ipvs);
/* Bump the kthread configuration genid */
@@ -265,7 +265,8 @@ int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
}
set_user_nice(kd->task, sysctl_est_nice(ipvs));
- set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
+ if (sysctl_est_preferred_cpulist(ipvs))
+ kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs));
pr_info("starting estimator thread %d...\n", kd->id);
wake_up_process(kd->task);
@@ -305,7 +306,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
int i;
if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
- ipvs->enable && ipvs->est_max_threads)
+ READ_ONCE(ipvs->enable) && ipvs->est_max_threads)
return -EINVAL;
mutex_lock(&ipvs->est_mutex);
@@ -342,7 +343,7 @@ static int ip_vs_est_add_kthread(struct netns_ipvs *ipvs)
}
/* Start kthread tasks only when services are present */
- if (ipvs->enable && !ip_vs_est_stopped(ipvs)) {
+ if (READ_ONCE(ipvs->enable) && !ip_vs_est_stopped(ipvs)) {
ret = ip_vs_est_kthread_start(ipvs, kd);
if (ret < 0)
goto out;
@@ -485,7 +486,7 @@ int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
struct ip_vs_estimator *est = &stats->est;
int ret;
- if (!ipvs->est_max_threads && ipvs->enable)
+ if (!ipvs->est_max_threads && READ_ONCE(ipvs->enable))
ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
est->ktid = -1;
@@ -662,7 +663,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
/* Wait for cpufreq frequency transition */
wait_event_idle_timeout(wq, kthread_should_stop(),
HZ / 50);
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto stop;
}
@@ -680,7 +681,7 @@ static int ip_vs_est_calc_limits(struct netns_ipvs *ipvs, int *chain_max)
rcu_read_unlock();
local_bh_enable();
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto stop;
cond_resched();
@@ -756,7 +757,7 @@ static void ip_vs_est_calc_phase(struct netns_ipvs *ipvs)
mutex_lock(&ipvs->est_mutex);
for (id = 1; id < ipvs->est_kt_count; id++) {
/* netns clean up started, abort */
- if (!ipvs->enable)
+ if (!READ_ONCE(ipvs->enable))
goto unlock2;
kd = ipvs->est_kt_arr[id];
if (!kd)
@@ -786,7 +787,7 @@ last_kt:
id = ipvs->est_kt_count;
next_kt:
- if (!ipvs->enable || kthread_should_stop())
+ if (!READ_ONCE(ipvs->enable) || kthread_should_stop())
goto unlock;
id--;
if (id < 0)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index d8a284999544..206c6700e200 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -53,6 +53,7 @@ enum {
IP_VS_FTP_EPSV,
};
+static bool exiting_module;
/*
* List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
* First port is set to the default port.
@@ -605,7 +606,7 @@ static void __ip_vs_ftp_exit(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
- if (!ipvs)
+ if (!ipvs || !exiting_module)
return;
unregister_ip_vs_app(ipvs, &ip_vs_ftp);
@@ -627,6 +628,7 @@ static int __init ip_vs_ftp_init(void)
*/
static void __exit ip_vs_ftp_exit(void)
{
+ exiting_module = true;
unregister_pernet_subsys(&ip_vs_ftp_ops);
/* rcu_barrier() is called by netns */
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 014f07740369..95af252b2939 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -97,7 +97,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest)
if (!dest_dst)
return NULL;
dst = dest_dst->dst_cache;
- if (dst->obsolete &&
+ if (READ_ONCE(dst->obsolete) &&
dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
return NULL;
return dest_dst;
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index 06b084844700..46e667a50d98 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -17,7 +17,7 @@ static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
.skb = skb,
};
- return bpf_prog_run(prog, &ctx);
+ return bpf_prog_run_pin_on_cpu(prog, &ctx);
}
struct bpf_nf_link {
@@ -225,7 +225,8 @@ int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (!link)
return -ENOMEM;
- bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog);
+ bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog,
+ attr->link_create.attach_type);
link->hook_ops.hook = nf_hook_run_bpf;
link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF;
@@ -295,6 +296,9 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
return false;
+ if (off % size != 0)
+ return false;
+
if (type == BPF_WRITE)
return false;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 201d3c4ec623..344f88295976 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -136,8 +136,8 @@ static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
}
/* return true if we need to recompute hashes (in case hash table was resized) */
-static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
- unsigned int h2, unsigned int sequence)
+static bool nf_conntrack_double_lock(unsigned int h1, unsigned int h2,
+ unsigned int sequence)
{
h1 %= CONNTRACK_LOCKS;
h2 %= CONNTRACK_LOCKS;
@@ -329,9 +329,6 @@ nf_ct_get_tuple(const struct sk_buff *skb,
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP:
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- case IPPROTO_DCCP:
-#endif
/* fallthrough */
return nf_ct_get_tuple_ports(skb, dataoff, tuple);
default:
@@ -616,7 +613,7 @@ static void __nf_ct_delete_from_lists(struct nf_conn *ct)
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
- } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+ } while (nf_conntrack_double_lock(hash, reply_hash, sequence));
clean_from_lists(ct);
nf_conntrack_double_unlock(hash, reply_hash);
@@ -893,7 +890,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
- } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+ } while (nf_conntrack_double_lock(hash, reply_hash, sequence));
max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
@@ -1124,6 +1121,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
+ /* confirmed bit must be set after hlist add, not before:
+ * loser_ct can still be visible to other cpu due to
+ * SLAB_TYPESAFE_BY_RCU.
+ */
+ smp_mb__before_atomic();
+ set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
@@ -1231,7 +1234,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
reply_hash = hash_conntrack(net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
- } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+ } while (nf_conntrack_double_lock(hash, reply_hash, sequence));
/* We're not in hash table, and we refuse to set up related
* connections for unconfirmed conns. But packet copies and
@@ -1260,8 +1263,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
- ct->status |= IPS_CONFIRMED;
-
if (unlikely(nf_ct_is_dying(ct))) {
NF_CT_STAT_INC(net, insert_failed);
goto dying;
@@ -1293,7 +1294,7 @@ chaintoolong:
}
}
- /* Timer relative to confirmation time, not original
+ /* Timeout is relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
@@ -1301,11 +1302,21 @@ chaintoolong:
__nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
- * starting the timer and setting the CONFIRMED bit. The RCU barriers
- * guarantee that no other CPU can find the conntrack before the above
- * stores are visible.
+ * setting ct->timeout. The RCU barriers guarantee that no other CPU
+ * can find the conntrack before the above stores are visible.
*/
__nf_conntrack_hash_insert(ct, hash, reply_hash);
+
+ /* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
+ * skip entries that lack this bit. This happens when a CPU is looking
+ * at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
+ * or when another CPU encounters this entry right after the insertion
+ * but before the set-confirm-bit below. This bit must not be set until
+ * after __nf_conntrack_hash_insert().
+ */
+ smp_mb__before_atomic();
+ set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
nf_conntrack_double_unlock(hash, reply_hash);
local_bh_enable();
@@ -1662,7 +1673,11 @@ __nf_conntrack_alloc(struct net *net,
if (!conntrack_gc_work.early_drop)
conntrack_gc_work.early_drop = true;
atomic_dec(&cnet->count);
- net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
+ if (net == &init_net)
+ net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
+ else
+ net_warn_ratelimited("nf_conntrack: table full in netns %u, dropping packet\n",
+ net->ns.inum);
return ERR_PTR(-ENOMEM);
}
}
@@ -1982,11 +1997,6 @@ static int nf_conntrack_handle_packet(struct nf_conn *ct,
return nf_conntrack_sctp_packet(ct, skb, dataoff,
ctinfo, state);
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- case IPPROTO_DCCP:
- return nf_conntrack_dccp_packet(ct, skb, dataoff,
- ctinfo, state);
-#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
case IPPROTO_GRE:
return nf_conntrack_gre_packet(ct, skb, dataoff,
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index af68c64acaab..81baf2082604 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -301,7 +301,7 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
net->ct.ecache_dwork_pending = true;
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
if (!hlist_nulls_empty(&cnet->ecache.dying_list))
- mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
+ mod_delayed_work(system_percpu_wq, &cnet->ecache.dwork, 0);
else
net->ct.ecache_dwork_pending = false;
}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4ed5878cb25b..ceb48c3ca0a4 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -368,7 +368,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
(cur->tuple.src.l3num == NFPROTO_UNSPEC ||
cur->tuple.src.l3num == me->tuple.src.l3num) &&
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
- ret = -EEXIST;
+ ret = -EBUSY;
goto out;
}
}
@@ -379,7 +379,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
&mask)) {
- ret = -EEXIST;
+ ret = -EBUSY;
goto out;
}
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2cc0fde23344..3a04665adf99 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -60,7 +60,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("List and change connection tracking table");
struct ctnetlink_list_dump_ctx {
- struct nf_conn *last;
+ unsigned long last_id;
unsigned int cpu;
bool done;
};
@@ -884,8 +884,6 @@ errout:
static int ctnetlink_done(struct netlink_callback *cb)
{
- if (cb->args[1])
- nf_ct_put((struct nf_conn *)cb->args[1]);
kfree(cb->data);
return 0;
}
@@ -1208,19 +1206,26 @@ ignore_entry:
return 0;
}
+static unsigned long ctnetlink_get_id(const struct nf_conn *ct)
+{
+ unsigned long id = nf_ct_get_id(ct);
+
+ return id ? id : 1;
+}
+
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
struct net *net = sock_net(skb->sk);
- struct nf_conn *ct, *last;
+ unsigned long last_id = cb->args[1];
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nf_conn *nf_ct_evict[8];
+ struct nf_conn *ct;
int res, i;
spinlock_t *lockp;
- last = (struct nf_conn *)cb->args[1];
i = 0;
local_bh_disable();
@@ -1257,7 +1262,7 @@ restart:
continue;
if (cb->args[1]) {
- if (ct != last)
+ if (ctnetlink_get_id(ct) != last_id)
continue;
cb->args[1] = 0;
}
@@ -1270,8 +1275,7 @@ restart:
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, true, flags);
if (res < 0) {
- nf_conntrack_get(&ct->ct_general);
- cb->args[1] = (unsigned long)ct;
+ cb->args[1] = ctnetlink_get_id(ct);
spin_unlock(lockp);
goto out;
}
@@ -1284,12 +1288,10 @@ restart:
}
out:
local_bh_enable();
- if (last) {
+ if (last_id) {
/* nf ct hash resize happened, now clear the leftover. */
- if ((struct nf_conn *)cb->args[1] == last)
+ if (cb->args[1] == last_id)
cb->args[1] = 0;
-
- nf_ct_put(last);
}
while (i) {
@@ -1731,16 +1733,6 @@ static int ctnetlink_get_conntrack(struct sk_buff *skb,
return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid);
}
-static int ctnetlink_done_list(struct netlink_callback *cb)
-{
- struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
-
- if (ctx->last)
- nf_ct_put(ctx->last);
-
- return 0;
-}
-
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int ctnetlink_dump_one_entry(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1755,11 +1747,11 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
if (l3proto && nf_ct_l3num(ct) != l3proto)
return 0;
- if (ctx->last) {
- if (ct != ctx->last)
+ if (ctx->last_id) {
+ if (ctnetlink_get_id(ct) != ctx->last_id)
return 0;
- ctx->last = NULL;
+ ctx->last_id = 0;
}
/* We can't dump extension info for the unconfirmed
@@ -1773,12 +1765,8 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, dying, 0);
- if (res < 0) {
- if (!refcount_inc_not_zero(&ct->ct_general.use))
- return 0;
-
- ctx->last = ct;
- }
+ if (res < 0)
+ ctx->last_id = ctnetlink_get_id(ct);
return res;
}
@@ -1794,10 +1782,10 @@ static int
ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
- struct nf_conn *last = ctx->last;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
const struct net *net = sock_net(skb->sk);
struct nf_conntrack_net_ecache *ecache_net;
+ unsigned long last_id = ctx->last_id;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
#endif
@@ -1805,7 +1793,7 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
if (ctx->done)
return 0;
- ctx->last = NULL;
+ ctx->last_id = 0;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache_net = nf_conn_pernet_ecache(net);
@@ -1816,24 +1804,21 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
int res;
ct = nf_ct_tuplehash_to_ctrack(h);
- if (last && last != ct)
+ if (last_id && last_id != ctnetlink_get_id(ct))
continue;
res = ctnetlink_dump_one_entry(skb, cb, ct, true);
if (res < 0) {
spin_unlock_bh(&ecache_net->dying_lock);
- nf_ct_put(last);
return skb->len;
}
- nf_ct_put(last);
- last = NULL;
+ last_id = 0;
}
spin_unlock_bh(&ecache_net->dying_lock);
#endif
ctx->done = true;
- nf_ct_put(last);
return skb->len;
}
@@ -1845,7 +1830,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb,
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_dump_dying,
- .done = ctnetlink_done_list,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
@@ -1860,7 +1844,6 @@ static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = ctnetlink_dump_unconfirmed,
- .done = ctnetlink_done_list,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
@@ -2036,7 +2019,6 @@ static void ctnetlink_change_mark(struct nf_conn *ct,
static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
[CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
- [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
[CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
};
@@ -3169,23 +3151,27 @@ errout:
return 0;
}
#endif
-static int ctnetlink_exp_done(struct netlink_callback *cb)
+
+static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp)
{
- if (cb->args[1])
- nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
- return 0;
+ unsigned long id = (unsigned long)exp;
+
+ id += nf_ct_get_id(exp->master);
+ id += exp->class;
+
+ return id ? id : 1;
}
static int
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
@@ -3197,7 +3183,7 @@ restart:
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3206,9 +3192,7 @@ restart:
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3219,32 +3203,30 @@ restart:
}
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
static int
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nf_conn *ct = cb->data;
struct nf_conn_help *help = nfct_help(ct);
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
if (cb->args[0])
return 0;
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
+
restart:
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3252,9 +3234,7 @@ restart:
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3265,9 +3245,6 @@ restart:
cb->args[0] = 1;
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
@@ -3286,7 +3263,6 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
- .done = ctnetlink_exp_done,
};
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
@@ -3336,7 +3312,6 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
else {
struct netlink_dump_control c = {
.dump = ctnetlink_exp_dump_table,
- .done = ctnetlink_exp_done,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index f36727ed91e1..bc1d96686b9c 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -100,9 +100,6 @@ const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto)
case IPPROTO_UDP: return &nf_conntrack_l4proto_udp;
case IPPROTO_TCP: return &nf_conntrack_l4proto_tcp;
case IPPROTO_ICMP: return &nf_conntrack_l4proto_icmp;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- case IPPROTO_DCCP: return &nf_conntrack_l4proto_dccp;
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
case IPPROTO_SCTP: return &nf_conntrack_l4proto_sctp;
#endif
@@ -681,9 +678,6 @@ void nf_conntrack_proto_pernet_init(struct net *net)
#if IS_ENABLED(CONFIG_IPV6)
nf_conntrack_icmpv6_init_net(net);
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- nf_conntrack_dccp_init_net(net);
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
nf_conntrack_sctp_init_net(net);
#endif
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
deleted file mode 100644
index ebc4f733bb2e..000000000000
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ /dev/null
@@ -1,826 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * DCCP connection tracking protocol helper
- *
- * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/dccp.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-
-#include <linux/netfilter/nfnetlink_conntrack.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_conntrack_timeout.h>
-#include <net/netfilter/nf_log.h>
-
-/* Timeouts are based on values from RFC4340:
- *
- * - REQUEST:
- *
- * 8.1.2. Client Request
- *
- * A client MAY give up on its DCCP-Requests after some time
- * (3 minutes, for example).
- *
- * - RESPOND:
- *
- * 8.1.3. Server Response
- *
- * It MAY also leave the RESPOND state for CLOSED after a timeout of
- * not less than 4MSL (8 minutes);
- *
- * - PARTOPEN:
- *
- * 8.1.5. Handshake Completion
- *
- * If the client remains in PARTOPEN for more than 4MSL (8 minutes),
- * it SHOULD reset the connection with Reset Code 2, "Aborted".
- *
- * - OPEN:
- *
- * The DCCP timestamp overflows after 11.9 hours. If the connection
- * stays idle this long the sequence number won't be recognized
- * as valid anymore.
- *
- * - CLOSEREQ/CLOSING:
- *
- * 8.3. Termination
- *
- * The retransmission timer should initially be set to go off in two
- * round-trip times and should back off to not less than once every
- * 64 seconds ...
- *
- * - TIMEWAIT:
- *
- * 4.3. States
- *
- * A server or client socket remains in this state for 2MSL (4 minutes)
- * after the connection has been town down, ...
- */
-
-#define DCCP_MSL (2 * 60 * HZ)
-
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
-static const char * const dccp_state_names[] = {
- [CT_DCCP_NONE] = "NONE",
- [CT_DCCP_REQUEST] = "REQUEST",
- [CT_DCCP_RESPOND] = "RESPOND",
- [CT_DCCP_PARTOPEN] = "PARTOPEN",
- [CT_DCCP_OPEN] = "OPEN",
- [CT_DCCP_CLOSEREQ] = "CLOSEREQ",
- [CT_DCCP_CLOSING] = "CLOSING",
- [CT_DCCP_TIMEWAIT] = "TIMEWAIT",
- [CT_DCCP_IGNORE] = "IGNORE",
- [CT_DCCP_INVALID] = "INVALID",
-};
-#endif
-
-#define sNO CT_DCCP_NONE
-#define sRQ CT_DCCP_REQUEST
-#define sRS CT_DCCP_RESPOND
-#define sPO CT_DCCP_PARTOPEN
-#define sOP CT_DCCP_OPEN
-#define sCR CT_DCCP_CLOSEREQ
-#define sCG CT_DCCP_CLOSING
-#define sTW CT_DCCP_TIMEWAIT
-#define sIG CT_DCCP_IGNORE
-#define sIV CT_DCCP_INVALID
-
-/*
- * DCCP state transition table
- *
- * The assumption is the same as for TCP tracking:
- *
- * We are the man in the middle. All the packets go through us but might
- * get lost in transit to the destination. It is assumed that the destination
- * can't receive segments we haven't seen.
- *
- * The following states exist:
- *
- * NONE: Initial state, expecting Request
- * REQUEST: Request seen, waiting for Response from server
- * RESPOND: Response from server seen, waiting for Ack from client
- * PARTOPEN: Ack after Response seen, waiting for packet other than Response,
- * Reset or Sync from server
- * OPEN: Packet other than Response, Reset or Sync seen
- * CLOSEREQ: CloseReq from server seen, expecting Close from client
- * CLOSING: Close seen, expecting Reset
- * TIMEWAIT: Reset seen
- * IGNORE: Not determinable whether packet is valid
- *
- * Some states exist only on one side of the connection: REQUEST, RESPOND,
- * PARTOPEN, CLOSEREQ. For the other side these states are equivalent to
- * the one it was in before.
- *
- * Packets are marked as ignored (sIG) if we don't know if they're valid
- * (for example a reincarnation of a connection we didn't notice is dead
- * already) and the server may send back a connection closing Reset or a
- * Response. They're also used for Sync/SyncAck packets, which we don't
- * care about.
- */
-static const u_int8_t
-dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] = {
- [CT_DCCP_ROLE_CLIENT] = {
- [DCCP_PKT_REQUEST] = {
- /*
- * sNO -> sRQ Regular Request
- * sRQ -> sRQ Retransmitted Request or reincarnation
- * sRS -> sRS Retransmitted Request (apparently Response
- * got lost after we saw it) or reincarnation
- * sPO -> sIG Ignore, conntrack might be out of sync
- * sOP -> sIG Ignore, conntrack might be out of sync
- * sCR -> sIG Ignore, conntrack might be out of sync
- * sCG -> sIG Ignore, conntrack might be out of sync
- * sTW -> sRQ Reincarnation
- *
- * sNO, sRQ, sRS, sPO. sOP, sCR, sCG, sTW, */
- sRQ, sRQ, sRS, sIG, sIG, sIG, sIG, sRQ,
- },
- [DCCP_PKT_RESPONSE] = {
- /*
- * sNO -> sIV Invalid
- * sRQ -> sIG Ignore, might be response to ignored Request
- * sRS -> sIG Ignore, might be response to ignored Request
- * sPO -> sIG Ignore, might be response to ignored Request
- * sOP -> sIG Ignore, might be response to ignored Request
- * sCR -> sIG Ignore, might be response to ignored Request
- * sCG -> sIG Ignore, might be response to ignored Request
- * sTW -> sIV Invalid, reincarnation in reverse direction
- * goes through sRQ
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIV,
- },
- [DCCP_PKT_ACK] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
- * sPO -> sPO Retransmitted Ack for Response, remain in PARTOPEN
- * sOP -> sOP Regular ACK, remain in OPEN
- * sCR -> sCR Ack in CLOSEREQ MAY be processed (8.3.)
- * sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
- },
- [DCCP_PKT_DATA] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sIV MUST use DataAck in PARTOPEN state (8.1.5.)
- * sOP -> sOP Regular Data packet
- * sCR -> sCR Data in CLOSEREQ MAY be processed (8.3.)
- * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sIV, sOP, sCR, sCG, sIV,
- },
- [DCCP_PKT_DATAACK] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sPO Ack for Response, move to PARTOPEN (8.1.5.)
- * sPO -> sPO Remain in PARTOPEN state
- * sOP -> sOP Regular DataAck packet in OPEN state
- * sCR -> sCR DataAck in CLOSEREQ MAY be processed (8.3.)
- * sCG -> sCG DataAck in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sPO, sPO, sOP, sCR, sCG, sIV
- },
- [DCCP_PKT_CLOSEREQ] = {
- /*
- * CLOSEREQ may only be sent by the server.
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV
- },
- [DCCP_PKT_CLOSE] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sCG Client-initiated close
- * sOP -> sCG Client-initiated close
- * sCR -> sCG Close in response to CloseReq (8.3.)
- * sCG -> sCG Retransmit
- * sTW -> sIV Late retransmit, already in TIME_WAIT
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sCG, sCG, sCG, sIV, sIV
- },
- [DCCP_PKT_RESET] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sTW Sync received or timeout, SHOULD send Reset (8.1.1.)
- * sRS -> sTW Response received without Request
- * sPO -> sTW Timeout, SHOULD send Reset (8.1.5.)
- * sOP -> sTW Connection reset
- * sCR -> sTW Connection reset
- * sCG -> sTW Connection reset
- * sTW -> sIG Ignore (don't refresh timer)
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sTW, sTW, sTW, sTW, sTW, sTW, sIG
- },
- [DCCP_PKT_SYNC] = {
- /*
- * We currently ignore Sync packets
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
- },
- [DCCP_PKT_SYNCACK] = {
- /*
- * We currently ignore SyncAck packets
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
- },
- },
- [CT_DCCP_ROLE_SERVER] = {
- [DCCP_PKT_REQUEST] = {
- /*
- * sNO -> sIV Invalid
- * sRQ -> sIG Ignore, conntrack might be out of sync
- * sRS -> sIG Ignore, conntrack might be out of sync
- * sPO -> sIG Ignore, conntrack might be out of sync
- * sOP -> sIG Ignore, conntrack might be out of sync
- * sCR -> sIG Ignore, conntrack might be out of sync
- * sCG -> sIG Ignore, conntrack might be out of sync
- * sTW -> sRQ Reincarnation, must reverse roles
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sRQ
- },
- [DCCP_PKT_RESPONSE] = {
- /*
- * sNO -> sIV Response without Request
- * sRQ -> sRS Response to clients Request
- * sRS -> sRS Retransmitted Response (8.1.3. SHOULD NOT)
- * sPO -> sIG Response to an ignored Request or late retransmit
- * sOP -> sIG Ignore, might be response to ignored Request
- * sCR -> sIG Ignore, might be response to ignored Request
- * sCG -> sIG Ignore, might be response to ignored Request
- * sTW -> sIV Invalid, Request from client in sTW moves to sRQ
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sRS, sRS, sIG, sIG, sIG, sIG, sIV
- },
- [DCCP_PKT_ACK] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sOP Enter OPEN state (8.1.5.)
- * sOP -> sOP Regular Ack in OPEN state
- * sCR -> sIV Waiting for Close from client
- * sCG -> sCG Ack in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
- },
- [DCCP_PKT_DATA] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sOP Enter OPEN state (8.1.5.)
- * sOP -> sOP Regular Data packet in OPEN state
- * sCR -> sIV Waiting for Close from client
- * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
- },
- [DCCP_PKT_DATAACK] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sOP Enter OPEN state (8.1.5.)
- * sOP -> sOP Regular DataAck in OPEN state
- * sCR -> sIV Waiting for Close from client
- * sCG -> sCG Data in CLOSING MAY be processed (8.3.)
- * sTW -> sIV
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sOP, sOP, sIV, sCG, sIV
- },
- [DCCP_PKT_CLOSEREQ] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sOP -> sCR Move directly to CLOSEREQ (8.1.5.)
- * sOP -> sCR CloseReq in OPEN state
- * sCR -> sCR Retransmit
- * sCG -> sCR Simultaneous close, client sends another Close
- * sTW -> sIV Already closed
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sCR, sCR, sCR, sCR, sIV
- },
- [DCCP_PKT_CLOSE] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sIV No connection
- * sRS -> sIV No connection
- * sPO -> sOP -> sCG Move direcly to CLOSING
- * sOP -> sCG Move to CLOSING
- * sCR -> sIV Close after CloseReq is invalid
- * sCG -> sCG Retransmit
- * sTW -> sIV Already closed
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIV, sIV, sCG, sCG, sIV, sCG, sIV
- },
- [DCCP_PKT_RESET] = {
- /*
- * sNO -> sIV No connection
- * sRQ -> sTW Reset in response to Request
- * sRS -> sTW Timeout, SHOULD send Reset (8.1.3.)
- * sPO -> sTW Timeout, SHOULD send Reset (8.1.3.)
- * sOP -> sTW
- * sCR -> sTW
- * sCG -> sTW
- * sTW -> sIG Ignore (don't refresh timer)
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW, sTW */
- sIV, sTW, sTW, sTW, sTW, sTW, sTW, sTW, sIG
- },
- [DCCP_PKT_SYNC] = {
- /*
- * We currently ignore Sync packets
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
- },
- [DCCP_PKT_SYNCACK] = {
- /*
- * We currently ignore SyncAck packets
- *
- * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
- },
- },
-};
-
-static noinline bool
-dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- const struct dccp_hdr *dh,
- const struct nf_hook_state *hook_state)
-{
- struct net *net = nf_ct_net(ct);
- struct nf_dccp_net *dn;
- const char *msg;
- u_int8_t state;
-
- state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
- switch (state) {
- default:
- dn = nf_dccp_pernet(net);
- if (dn->dccp_loose == 0) {
- msg = "not picking up existing connection ";
- goto out_invalid;
- }
- break;
- case CT_DCCP_REQUEST:
- break;
- case CT_DCCP_INVALID:
- msg = "invalid state transition ";
- goto out_invalid;
- }
-
- ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
- ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
- ct->proto.dccp.state = CT_DCCP_NONE;
- ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
- ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
- ct->proto.dccp.handshake_seq = 0;
- return true;
-
-out_invalid:
- nf_ct_l4proto_log_invalid(skb, ct, hook_state, "%s", msg);
- return false;
-}
-
-static u64 dccp_ack_seq(const struct dccp_hdr *dh)
-{
- const struct dccp_hdr_ack_bits *dhack;
-
- dhack = (void *)dh + __dccp_basic_hdr_len(dh);
- return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) +
- ntohl(dhack->dccph_ack_nr_low);
-}
-
-static bool dccp_error(const struct dccp_hdr *dh,
- struct sk_buff *skb, unsigned int dataoff,
- const struct nf_hook_state *state)
-{
- static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
- 1 << DCCP_PKT_RESPONSE |
- 1 << DCCP_PKT_CLOSEREQ |
- 1 << DCCP_PKT_CLOSE |
- 1 << DCCP_PKT_RESET |
- 1 << DCCP_PKT_SYNC |
- 1 << DCCP_PKT_SYNCACK;
- unsigned int dccp_len = skb->len - dataoff;
- unsigned int cscov;
- const char *msg;
- u8 type;
-
- BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
-
- if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) ||
- dh->dccph_doff * 4 > dccp_len) {
- msg = "nf_ct_dccp: truncated/malformed packet ";
- goto out_invalid;
- }
-
- cscov = dccp_len;
- if (dh->dccph_cscov) {
- cscov = (dh->dccph_cscov - 1) * 4;
- if (cscov > dccp_len) {
- msg = "nf_ct_dccp: bad checksum coverage ";
- goto out_invalid;
- }
- }
-
- if (state->hook == NF_INET_PRE_ROUTING &&
- state->net->ct.sysctl_checksum &&
- nf_checksum_partial(skb, state->hook, dataoff, cscov,
- IPPROTO_DCCP, state->pf)) {
- msg = "nf_ct_dccp: bad checksum ";
- goto out_invalid;
- }
-
- type = dh->dccph_type;
- if (type >= DCCP_PKT_INVALID) {
- msg = "nf_ct_dccp: reserved packet type ";
- goto out_invalid;
- }
-
- if (test_bit(type, &require_seq48) && !dh->dccph_x) {
- msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
- goto out_invalid;
- }
-
- return false;
-out_invalid:
- nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg);
- return true;
-}
-
-struct nf_conntrack_dccp_buf {
- struct dccp_hdr dh; /* generic header part */
- struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
- union { /* depends on header type */
- struct dccp_hdr_ack_bits ack;
- struct dccp_hdr_request req;
- struct dccp_hdr_response response;
- struct dccp_hdr_reset rst;
- } u;
-};
-
-static struct dccp_hdr *
-dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
- struct nf_conntrack_dccp_buf *buf)
-{
- unsigned int hdrlen = __dccp_hdr_len(dh);
-
- if (hdrlen > sizeof(*buf))
- return NULL;
-
- return skb_header_pointer(skb, offset, hdrlen, buf);
-}
-
-int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state)
-{
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct nf_conntrack_dccp_buf _dh;
- u_int8_t type, old_state, new_state;
- enum ct_dccp_roles role;
- unsigned int *timeouts;
- struct dccp_hdr *dh;
-
- dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh);
- if (!dh)
- return -NF_ACCEPT;
-
- if (dccp_error(dh, skb, dataoff, state))
- return -NF_ACCEPT;
-
- /* pull again, including possible 48 bit sequences and subtype header */
- dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
- if (!dh)
- return -NF_ACCEPT;
-
- type = dh->dccph_type;
- if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state))
- return -NF_ACCEPT;
-
- if (type == DCCP_PKT_RESET &&
- !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- /* Tear down connection immediately if only reply is a RESET */
- nf_ct_kill_acct(ct, ctinfo, skb);
- return NF_ACCEPT;
- }
-
- spin_lock_bh(&ct->lock);
-
- role = ct->proto.dccp.role[dir];
- old_state = ct->proto.dccp.state;
- new_state = dccp_state_table[role][type][old_state];
-
- switch (new_state) {
- case CT_DCCP_REQUEST:
- if (old_state == CT_DCCP_TIMEWAIT &&
- role == CT_DCCP_ROLE_SERVER) {
- /* Reincarnation in the reverse direction: reopen and
- * reverse client/server roles. */
- ct->proto.dccp.role[dir] = CT_DCCP_ROLE_CLIENT;
- ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_SERVER;
- }
- break;
- case CT_DCCP_RESPOND:
- if (old_state == CT_DCCP_REQUEST)
- ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
- break;
- case CT_DCCP_PARTOPEN:
- if (old_state == CT_DCCP_RESPOND &&
- type == DCCP_PKT_ACK &&
- dccp_ack_seq(dh) == ct->proto.dccp.handshake_seq)
- set_bit(IPS_ASSURED_BIT, &ct->status);
- break;
- case CT_DCCP_IGNORE:
- /*
- * Connection tracking might be out of sync, so we ignore
- * packets that might establish a new connection and resync
- * if the server responds with a valid Response.
- */
- if (ct->proto.dccp.last_dir == !dir &&
- ct->proto.dccp.last_pkt == DCCP_PKT_REQUEST &&
- type == DCCP_PKT_RESPONSE) {
- ct->proto.dccp.role[!dir] = CT_DCCP_ROLE_CLIENT;
- ct->proto.dccp.role[dir] = CT_DCCP_ROLE_SERVER;
- ct->proto.dccp.handshake_seq = dccp_hdr_seq(dh);
- new_state = CT_DCCP_RESPOND;
- break;
- }
- ct->proto.dccp.last_dir = dir;
- ct->proto.dccp.last_pkt = type;
-
- spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid packet");
- return NF_ACCEPT;
- case CT_DCCP_INVALID:
- spin_unlock_bh(&ct->lock);
- nf_ct_l4proto_log_invalid(skb, ct, state, "%s", "invalid state transition");
- return -NF_ACCEPT;
- }
-
- ct->proto.dccp.last_dir = dir;
- ct->proto.dccp.last_pkt = type;
- ct->proto.dccp.state = new_state;
- spin_unlock_bh(&ct->lock);
-
- if (new_state != old_state)
- nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
-
- timeouts = nf_ct_timeout_lookup(ct);
- if (!timeouts)
- timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
- nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
-
- return NF_ACCEPT;
-}
-
-static bool dccp_can_early_drop(const struct nf_conn *ct)
-{
- switch (ct->proto.dccp.state) {
- case CT_DCCP_CLOSEREQ:
- case CT_DCCP_CLOSING:
- case CT_DCCP_TIMEWAIT:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
-static void dccp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
-{
- seq_printf(s, "%s ", dccp_state_names[ct->proto.dccp.state]);
-}
-#endif
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
- struct nf_conn *ct, bool destroy)
-{
- struct nlattr *nest_parms;
-
- spin_lock_bh(&ct->lock);
- nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP);
- if (!nest_parms)
- goto nla_put_failure;
- if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state))
- goto nla_put_failure;
-
- if (destroy)
- goto skip_state;
-
- if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
- ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
- nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
- cpu_to_be64(ct->proto.dccp.handshake_seq),
- CTA_PROTOINFO_DCCP_PAD))
- goto nla_put_failure;
-skip_state:
- nla_nest_end(skb, nest_parms);
- spin_unlock_bh(&ct->lock);
-
- return 0;
-
-nla_put_failure:
- spin_unlock_bh(&ct->lock);
- return -1;
-}
-
-static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
- [CTA_PROTOINFO_DCCP_STATE] = { .type = NLA_U8 },
- [CTA_PROTOINFO_DCCP_ROLE] = { .type = NLA_U8 },
- [CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ] = { .type = NLA_U64 },
- [CTA_PROTOINFO_DCCP_PAD] = { .type = NLA_UNSPEC },
-};
-
-#define DCCP_NLATTR_SIZE ( \
- NLA_ALIGN(NLA_HDRLEN + 1) + \
- NLA_ALIGN(NLA_HDRLEN + 1) + \
- NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \
- NLA_ALIGN(NLA_HDRLEN + 0))
-
-static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
-{
- struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
- struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1];
- int err;
-
- if (!attr)
- return 0;
-
- err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_DCCP_MAX, attr,
- dccp_nla_policy, NULL);
- if (err < 0)
- return err;
-
- if (!tb[CTA_PROTOINFO_DCCP_STATE] ||
- !tb[CTA_PROTOINFO_DCCP_ROLE] ||
- nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX ||
- nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) {
- return -EINVAL;
- }
-
- spin_lock_bh(&ct->lock);
- ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]);
- if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) {
- ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
- ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
- } else {
- ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_SERVER;
- ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_CLIENT;
- }
- if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) {
- ct->proto.dccp.handshake_seq =
- be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]));
- }
- spin_unlock_bh(&ct->lock);
- return 0;
-}
-#endif
-
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_cttimeout.h>
-
-static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
- struct net *net, void *data)
-{
- struct nf_dccp_net *dn = nf_dccp_pernet(net);
- unsigned int *timeouts = data;
- int i;
-
- if (!timeouts)
- timeouts = dn->dccp_timeout;
-
- /* set default DCCP timeouts. */
- for (i=0; i<CT_DCCP_MAX; i++)
- timeouts[i] = dn->dccp_timeout[i];
-
- /* there's a 1:1 mapping between attributes and protocol states. */
- for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
- if (tb[i]) {
- timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
- }
- }
-
- timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
- return 0;
-}
-
-static int
-dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
-{
- const unsigned int *timeouts = data;
- int i;
-
- for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
- if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
- goto nla_put_failure;
- }
- return 0;
-
-nla_put_failure:
- return -ENOSPC;
-}
-
-static const struct nla_policy
-dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
- [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
- [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
-};
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-
-void nf_conntrack_dccp_init_net(struct net *net)
-{
- struct nf_dccp_net *dn = nf_dccp_pernet(net);
-
- /* default values */
- dn->dccp_loose = 1;
- dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
-
- /* timeouts[0] is unused, make it same as SYN_SENT so
- * ->timeouts[0] contains 'new' timeout, like udp or icmp.
- */
- dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
-}
-
-const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp = {
- .l4proto = IPPROTO_DCCP,
- .can_early_drop = dccp_can_early_drop,
-#ifdef CONFIG_NF_CONNTRACK_PROCFS
- .print_conntrack = dccp_print_conntrack,
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_size = DCCP_NLATTR_SIZE,
- .to_nlattr = dccp_to_nlattr,
- .from_nlattr = nlattr_to_dccp,
- .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
- .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
- .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
- .nla_policy = nf_ct_port_nla_policy,
-#endif
-#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- .ctnl_timeout = {
- .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
- .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
- .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
- .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
- .nla_policy = dccp_timeout_nla_policy,
- },
-#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-};
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 6c4cff10357d..708b79380f04 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -14,6 +14,7 @@
#include <linux/sysctl.h>
#endif
+#include <net/netfilter/nf_log.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
@@ -67,11 +68,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
ntohs(tuple->dst.u.udp.port));
break;
- case IPPROTO_DCCP:
- seq_printf(s, "sport=%hu dport=%hu ",
- ntohs(tuple->src.u.dccp.port),
- ntohs(tuple->dst.u.dccp.port));
- break;
case IPPROTO_SCTP:
seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.sctp.port),
@@ -279,7 +275,6 @@ static const char* l4proto_name(u16 proto)
case IPPROTO_ICMP: return "icmp";
case IPPROTO_TCP: return "tcp";
case IPPROTO_UDP: return "udp";
- case IPPROTO_DCCP: return "dccp";
case IPPROTO_GRE: return "gre";
case IPPROTO_SCTP: return "sctp";
case IPPROTO_UDPLITE: return "udplite";
@@ -322,6 +317,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct)) {
+ struct ct_iter_state *st = s->private;
+
+ st->skip_elems--;
nf_ct_kill(ct);
goto release;
}
@@ -561,6 +559,29 @@ nf_conntrack_hash_sysctl(const struct ctl_table *table, int write,
return ret;
}
+static int
+nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, i;
+
+ ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
+ if (ret < 0 || !write)
+ return ret;
+
+ if (*(u8 *)table->data == 0)
+ return 0;
+
+ /* Load nf_log_syslog only if no logger is currently registered */
+ for (i = 0; i < NFPROTO_NUMPROTO; i++) {
+ if (nf_log_is_registered(i))
+ return 0;
+ }
+ request_module("%s", "nf_log_syslog");
+
+ return 0;
+}
+
static struct ctl_table_header *nf_ct_netfilter_header;
enum nf_ct_sysctl_index {
@@ -612,16 +633,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT,
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING,
- NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT,
- NF_SYSCTL_CT_PROTO_DCCP_LOOSE,
-#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
@@ -667,7 +678,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(u8),
.mode = 0644,
- .proc_handler = proc_dou8vec_minmax,
+ .proc_handler = nf_conntrack_log_invalid_sysctl,
},
[NF_SYSCTL_CT_EXPECT_MAX] = {
.procname = "nf_conntrack_expect_max",
@@ -895,58 +906,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = {
- .procname = "nf_conntrack_dccp_timeout_request",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_RESPOND] = {
- .procname = "nf_conntrack_dccp_timeout_respond",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_PARTOPEN] = {
- .procname = "nf_conntrack_dccp_timeout_partopen",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_OPEN] = {
- .procname = "nf_conntrack_dccp_timeout_open",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSEREQ] = {
- .procname = "nf_conntrack_dccp_timeout_closereq",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_CLOSING] = {
- .procname = "nf_conntrack_dccp_timeout_closing",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_TIMEWAIT] = {
- .procname = "nf_conntrack_dccp_timeout_timewait",
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- [NF_SYSCTL_CT_PROTO_DCCP_LOOSE] = {
- .procname = "nf_conntrack_dccp_loose",
- .maxlen = sizeof(u8),
- .mode = 0644,
- .proc_handler = proc_dou8vec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#endif
#ifdef CONFIG_NF_CT_PROTO_GRE
[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = {
.procname = "nf_conntrack_gre_timeout",
@@ -1032,29 +991,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,
#endif
}
-static void nf_conntrack_standalone_init_dccp_sysctl(struct net *net,
- struct ctl_table *table)
-{
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- struct nf_dccp_net *dn = nf_dccp_pernet(net);
-
-#define XASSIGN(XNAME, dn) \
- table[NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_ ## XNAME].data = \
- &(dn)->dccp_timeout[CT_DCCP_ ## XNAME]
-
- XASSIGN(REQUEST, dn);
- XASSIGN(RESPOND, dn);
- XASSIGN(PARTOPEN, dn);
- XASSIGN(OPEN, dn);
- XASSIGN(CLOSEREQ, dn);
- XASSIGN(CLOSING, dn);
- XASSIGN(TIMEWAIT, dn);
-#undef XASSIGN
-
- table[NF_SYSCTL_CT_PROTO_DCCP_LOOSE].data = &dn->dccp_loose;
-#endif
-}
-
static void nf_conntrack_standalone_init_gre_sysctl(struct net *net,
struct ctl_table *table)
{
@@ -1100,7 +1036,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);
- nf_conntrack_standalone_init_dccp_sysctl(net, table);
nf_conntrack_standalone_init_gre_sysctl(net, table);
/* Don't allow non-init_net ns to alter global sysctls */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 6dd0de33eebd..74cef8bf554c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -125,6 +125,32 @@ void nf_log_unregister(struct nf_logger *logger)
}
EXPORT_SYMBOL(nf_log_unregister);
+/**
+ * nf_log_is_registered - Check if any logger is registered for a given
+ * protocol family.
+ *
+ * @pf: Protocol family
+ *
+ * Returns: true if at least one logger is active for @pf, false otherwise.
+ */
+bool nf_log_is_registered(u_int8_t pf)
+{
+ int i;
+
+ if (pf >= NFPROTO_NUMPROTO) {
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
+ if (rcu_access_pointer(loggers[pf][i]))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(nf_log_is_registered);
+
int nf_log_bind_pf(struct net *net, u_int8_t pf,
const struct nf_logger *logger)
{
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index f391cd267922..78a61dac4ade 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -69,7 +69,6 @@ static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_dport = t->dst.u.all;
}
@@ -81,7 +80,6 @@ static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_sport = t->src.u.all;
}
@@ -102,7 +100,6 @@ static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl6->fl6_dport = t->dst.u.all;
}
@@ -114,7 +111,6 @@ static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
- t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl6->fl6_sport = t->src.u.all;
}
@@ -432,7 +428,6 @@ static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
- case IPPROTO_DCCP:
case IPPROTO_SCTP:
if (maniptype == NF_NAT_MANIP_SRC)
port = tuple->src.u.all;
@@ -632,7 +627,6 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
- case IPPROTO_DCCP:
if (maniptype == NF_NAT_MANIP_SRC)
keyptr = &tuple->src.u.all;
else
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
index dc450cc81222..b14a434b9561 100644
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@ -180,46 +180,6 @@ tcp_manip_pkt(struct sk_buff *skb,
}
static bool
-dccp_manip_pkt(struct sk_buff *skb,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- struct dccp_hdr *hdr;
- __be16 *portptr, oldport, newport;
- int hdrsize = 8; /* DCCP connection tracking guarantees this much */
-
- if (skb->len >= hdroff + sizeof(struct dccp_hdr))
- hdrsize = sizeof(struct dccp_hdr);
-
- if (skb_ensure_writable(skb, hdroff + hdrsize))
- return false;
-
- hdr = (struct dccp_hdr *)(skb->data + hdroff);
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- newport = tuple->src.u.dccp.port;
- portptr = &hdr->dccph_sport;
- } else {
- newport = tuple->dst.u.dccp.port;
- portptr = &hdr->dccph_dport;
- }
-
- oldport = *portptr;
- *portptr = newport;
-
- if (hdrsize < sizeof(*hdr))
- return true;
-
- nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
- false);
-#endif
- return true;
-}
-
-static bool
icmp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff, unsigned int hdroff,
const struct nf_conntrack_tuple *tuple,
@@ -338,9 +298,6 @@ static bool l4proto_manip_pkt(struct sk_buff *skb,
case IPPROTO_ICMPV6:
return icmpv6_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
- case IPPROTO_DCCP:
- return dccp_manip_pkt(skb, iphdroff, hdroff,
- tuple, maniptype);
case IPPROTO_GRE:
return gre_manip_pkt(skb, iphdroff, hdroff,
tuple, maniptype);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 24c71ecb2179..eed434e0a970 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -151,12 +151,12 @@ static void nft_ctx_init(struct nft_ctx *ctx,
bitmap_zero(ctx->reg_inited, NFT_REG32_NUM);
}
-static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
- int msg_type, u32 size, gfp_t gfp)
+static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
+ int msg_type, u32 size)
{
struct nft_trans *trans;
- trans = kzalloc(size, gfp);
+ trans = kzalloc(size, GFP_KERNEL);
if (trans == NULL)
return NULL;
@@ -172,12 +172,6 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx,
return trans;
}
-static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx,
- int msg_type, u32 size)
-{
- return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL);
-}
-
static struct nft_trans_binding *nft_trans_get_binding(struct nft_trans *trans)
{
switch (trans->msg_type) {
@@ -442,8 +436,7 @@ static bool nft_trans_collapse_set_elem_allowed(const struct nft_trans_elem *a,
static bool nft_trans_collapse_set_elem(struct nftables_pernet *nft_net,
struct nft_trans_elem *tail,
- struct nft_trans_elem *trans,
- gfp_t gfp)
+ struct nft_trans_elem *trans)
{
unsigned int nelems, old_nelems = tail->nelems;
struct nft_trans_elem *new_trans;
@@ -466,9 +459,11 @@ static bool nft_trans_collapse_set_elem(struct nftables_pernet *nft_net,
/* krealloc might free tail which invalidates list pointers */
list_del_init(&tail->nft_trans.list);
- new_trans = krealloc(tail, struct_size(tail, elems, nelems), gfp);
+ new_trans = krealloc(tail, struct_size(tail, elems, nelems),
+ GFP_KERNEL);
if (!new_trans) {
- list_add_tail(&tail->nft_trans.list, &nft_net->commit_list);
+ list_add_tail(&tail->nft_trans.list,
+ &nft_net->commit_list);
return false;
}
@@ -484,7 +479,7 @@ static bool nft_trans_collapse_set_elem(struct nftables_pernet *nft_net,
}
static bool nft_trans_try_collapse(struct nftables_pernet *nft_net,
- struct nft_trans *trans, gfp_t gfp)
+ struct nft_trans *trans)
{
struct nft_trans *tail;
@@ -501,7 +496,7 @@ static bool nft_trans_try_collapse(struct nftables_pernet *nft_net,
case NFT_MSG_DELSETELEM:
return nft_trans_collapse_set_elem(nft_net,
nft_trans_container_elem(tail),
- nft_trans_container_elem(trans), gfp);
+ nft_trans_container_elem(trans));
}
return false;
@@ -537,17 +532,14 @@ static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *tr
}
}
-static void nft_trans_commit_list_add_elem(struct net *net, struct nft_trans *trans,
- gfp_t gfp)
+static void nft_trans_commit_list_add_elem(struct net *net, struct nft_trans *trans)
{
struct nftables_pernet *nft_net = nft_pernet(net);
WARN_ON_ONCE(trans->msg_type != NFT_MSG_NEWSETELEM &&
trans->msg_type != NFT_MSG_DELSETELEM);
- might_alloc(gfp);
-
- if (nft_trans_try_collapse(nft_net, trans, gfp)) {
+ if (nft_trans_try_collapse(nft_net, trans)) {
kfree(trans);
return;
}
@@ -1131,11 +1123,14 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
return ERR_PTR(-ENOENT);
}
-static __be16 nft_base_seq(const struct net *net)
+static unsigned int nft_base_seq(const struct net *net)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
+ return READ_ONCE(net->nft.base_seq);
+}
- return htons(nft_net->base_seq & 0xffff);
+static __be16 nft_base_seq_be16(const struct net *net)
+{
+ return htons(nft_base_seq(net) & 0xffff);
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
@@ -1153,9 +1148,9 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
{
struct nlmsghdr *nlh;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
- NFNETLINK_V0, nft_base_seq(net));
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -1165,7 +1160,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
NFTA_TABLE_PAD))
goto nla_put_failure;
- if (event == NFT_MSG_DELTABLE) {
+ if (event == NFT_MSG_DELTABLE ||
+ event == NFT_MSG_DESTROYTABLE) {
nlmsg_end(skb, nlh);
return 0;
}
@@ -1247,7 +1243,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -1958,6 +1954,18 @@ nla_put_failure:
return -ENOSPC;
}
+static bool hook_is_prefix(struct nft_hook *hook)
+{
+ return strlen(hook->ifname) >= hook->ifnamelen;
+}
+
+static int nft_nla_put_hook_dev(struct sk_buff *skb, struct nft_hook *hook)
+{
+ int attr = hook_is_prefix(hook) ? NFTA_DEVICE_PREFIX : NFTA_DEVICE_NAME;
+
+ return nla_put_string(skb, attr, hook->ifname);
+}
+
static int nft_dump_basechain_hook(struct sk_buff *skb,
const struct net *net, int family,
const struct nft_base_chain *basechain,
@@ -1989,16 +1997,15 @@ static int nft_dump_basechain_hook(struct sk_buff *skb,
if (!first)
first = hook;
- if (nla_put(skb, NFTA_DEVICE_NAME,
- hook->ifnamelen, hook->ifname))
+ if (nft_nla_put_hook_dev(skb, hook))
goto nla_put_failure;
n++;
}
nla_nest_end(skb, nest_devs);
if (n == 1 &&
- nla_put(skb, NFTA_HOOK_DEV,
- first->ifnamelen, first->ifname))
+ !hook_is_prefix(first) &&
+ nla_put_string(skb, NFTA_HOOK_DEV, first->ifname))
goto nla_put_failure;
}
nla_nest_end(skb, nest);
@@ -2016,9 +2023,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
{
struct nlmsghdr *nlh;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
- NFNETLINK_V0, nft_base_seq(net));
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -2028,7 +2035,9 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
NFTA_CHAIN_PAD))
goto nla_put_failure;
- if (event == NFT_MSG_DELCHAIN && !hook_list) {
+ if (!hook_list &&
+ (event == NFT_MSG_DELCHAIN ||
+ event == NFT_MSG_DESTROYCHAIN)) {
nlmsg_end(skb, nlh);
return 0;
}
@@ -2119,7 +2128,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -2307,7 +2316,8 @@ void nf_tables_chain_destroy(struct nft_chain *chain)
}
static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
- const struct nlattr *attr)
+ const struct nlattr *attr,
+ bool prefix)
{
struct nf_hook_ops *ops;
struct net_device *dev;
@@ -2324,7 +2334,8 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
if (err < 0)
goto err_hook_free;
- hook->ifnamelen = nla_len(attr);
+ /* include the terminating NUL-char when comparing non-prefixes */
+ hook->ifnamelen = strlen(hook->ifname) + !prefix;
/* nf_tables_netdev_event() is called under rtnl_mutex, this is
* indirectly serializing all the other holders of the commit_mutex with
@@ -2371,14 +2382,22 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
struct nft_hook *hook, *next;
const struct nlattr *tmp;
int rem, n = 0, err;
+ bool prefix;
nla_for_each_nested(tmp, attr, rem) {
- if (nla_type(tmp) != NFTA_DEVICE_NAME) {
+ switch (nla_type(tmp)) {
+ case NFTA_DEVICE_NAME:
+ prefix = false;
+ break;
+ case NFTA_DEVICE_PREFIX:
+ prefix = true;
+ break;
+ default:
err = -EINVAL;
goto err_hook;
}
- hook = nft_netdev_hook_alloc(net, tmp);
+ hook = nft_netdev_hook_alloc(net, tmp, prefix);
if (IS_ERR(hook)) {
NL_SET_BAD_ATTR(extack, tmp);
err = PTR_ERR(hook);
@@ -2424,7 +2443,7 @@ static int nft_chain_parse_netdev(struct net *net, struct nlattr *tb[],
int err;
if (tb[NFTA_HOOK_DEV]) {
- hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]);
+ hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV], false);
if (IS_ERR(hook)) {
NL_SET_BAD_ATTR(extack, tb[NFTA_HOOK_DEV]);
return PTR_ERR(hook);
@@ -2800,6 +2819,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
struct nft_chain *chain = ctx->chain;
struct nft_chain_hook hook = {};
struct nft_stats __percpu *stats = NULL;
+ struct nftables_pernet *nft_net;
struct nft_hook *h, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@@ -2842,6 +2862,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
if (nft_hook_list_find(&basechain->hook_list, h)) {
list_del(&h->list);
nft_netdev_hook_free(h);
+ continue;
+ }
+
+ nft_net = nft_pernet(ctx->net);
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->msg_type != NFT_MSG_NEWCHAIN ||
+ trans->table != ctx->table ||
+ !nft_trans_chain_update(trans))
+ continue;
+
+ if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) {
+ nft_chain_release_hook(&hook);
+ return -EEXIST;
+ }
}
}
} else {
@@ -3632,7 +3666,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
- nft_base_seq(net));
+ nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -3800,7 +3834,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -4011,7 +4045,7 @@ static int nf_tables_getrule_reset(struct sk_buff *skb,
buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
nla_len(nla[NFTA_RULE_TABLE]),
(char *)nla_data(nla[NFTA_RULE_TABLE]),
- nft_net->base_seq);
+ nft_base_seq(net));
audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
kfree(buf);
@@ -4039,7 +4073,7 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule)
/* can only be used if rule is no longer visible to dumps */
static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
{
- lockdep_commit_lock_is_held(ctx->net);
+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
nf_tables_rule_destroy(ctx, rule);
@@ -4845,9 +4879,10 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
u32 seq = ctx->seq;
int i;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
- NFNETLINK_V0, nft_base_seq(ctx->net));
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, ctx->family, NFNETLINK_V0,
+ nft_base_seq_be16(ctx->net));
if (!nlh)
goto nla_put_failure;
@@ -4859,7 +4894,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
NFTA_SET_PAD))
goto nla_put_failure;
- if (event == NFT_MSG_DELSET) {
+ if (event == NFT_MSG_DELSET ||
+ event == NFT_MSG_DESTROYSET) {
nlmsg_end(skb, nlh);
return 0;
}
@@ -4991,7 +5027,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (ctx->family != NFPROTO_UNSPEC &&
@@ -5859,7 +5895,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
- lockdep_commit_lock_is_held(ctx->net);
+ WARN_ON_ONCE(!lockdep_commit_lock_is_held(ctx->net));
switch (phase) {
case NFT_TRANS_PREPARE_ERROR:
@@ -6168,7 +6204,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
@@ -6197,7 +6233,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
seq = cb->nlh->nlmsg_seq;
nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
- table->family, NFNETLINK_V0, nft_base_seq(net));
+ table->family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -6290,7 +6326,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
- NFNETLINK_V0, nft_base_seq(ctx->net));
+ NFNETLINK_V0, nft_base_seq_be16(ctx->net));
if (!nlh)
goto nla_put_failure;
@@ -6589,7 +6625,7 @@ static int nf_tables_getsetelem_reset(struct sk_buff *skb,
}
nelems++;
}
- audit_log_nft_set_reset(dump_ctx.ctx.table, nft_net->base_seq, nelems);
+ audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(info->net), nelems);
out_unlock:
rcu_read_unlock();
@@ -7529,7 +7565,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
ue->priv = elem_priv;
- nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
+ nft_trans_commit_list_add_elem(ctx->net, trans);
goto err_elem_free;
}
}
@@ -7553,7 +7589,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
}
nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
- nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
+ nft_trans_commit_list_add_elem(ctx->net, trans);
return 0;
err_set_full:
@@ -7819,7 +7855,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
nft_setelem_data_deactivate(ctx->net, set, elem.priv);
nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
- nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
+ nft_trans_commit_list_add_elem(ctx->net, trans);
return 0;
fail_ops:
@@ -7844,9 +7880,8 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
if (!nft_set_elem_active(ext, iter->genmask))
return 0;
- trans = nft_trans_alloc_gfp(ctx, NFT_MSG_DELSETELEM,
- struct_size_t(struct nft_trans_elem, elems, 1),
- GFP_ATOMIC);
+ trans = nft_trans_alloc(ctx, NFT_MSG_DELSETELEM,
+ struct_size_t(struct nft_trans_elem, elems, 1));
if (!trans)
return -ENOMEM;
@@ -7857,7 +7892,7 @@ static int nft_setelem_flush(const struct nft_ctx *ctx,
nft_trans_elem_set(trans) = set;
nft_trans_container_elem(trans)->nelems = 1;
nft_trans_container_elem(trans)->elems[0].priv = elem_priv;
- nft_trans_commit_list_add_elem(ctx->net, trans, GFP_ATOMIC);
+ nft_trans_commit_list_add_elem(ctx->net, trans);
return 0;
}
@@ -7874,7 +7909,7 @@ static int __nft_set_catchall_flush(const struct nft_ctx *ctx,
nft_setelem_data_deactivate(ctx->net, set, elem_priv);
nft_trans_container_elem(trans)->elems[0].priv = elem_priv;
- nft_trans_commit_list_add_elem(ctx->net, trans, GFP_KERNEL);
+ nft_trans_commit_list_add_elem(ctx->net, trans);
return 0;
}
@@ -8338,25 +8373,26 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
{
struct nlmsghdr *nlh;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
- NFNETLINK_V0, nft_base_seq(net));
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) ||
nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) ||
+ nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle),
NFTA_OBJ_PAD))
goto nla_put_failure;
- if (event == NFT_MSG_DELOBJ) {
+ if (event == NFT_MSG_DELOBJ ||
+ event == NFT_MSG_DESTROYOBJ) {
nlmsg_end(skb, nlh);
return 0;
}
- if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) ||
- nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
+ if (nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) ||
nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset))
goto nla_put_failure;
@@ -8404,7 +8440,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -8438,7 +8474,7 @@ cont:
idx++;
}
if (ctx->reset && entries)
- audit_log_obj_reset(table, nft_net->base_seq, entries);
+ audit_log_obj_reset(table, nft_base_seq(net), entries);
if (rc < 0)
break;
}
@@ -8607,7 +8643,7 @@ static int nf_tables_getobj_reset(struct sk_buff *skb,
buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
nla_len(nla[NFTA_OBJ_TABLE]),
(char *)nla_data(nla[NFTA_OBJ_TABLE]),
- nft_net->base_seq);
+ nft_base_seq(net));
audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
kfree(buf);
@@ -8712,9 +8748,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
u16 flags, int family, int report, gfp_t gfp)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
char *buf = kasprintf(gfp, "%s:%u",
- table->name, nft_net->base_seq);
+ table->name, nft_base_seq(net));
audit_log_nfcfg(buf,
family,
@@ -8889,11 +8924,12 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
list_for_each_entry(hook, &flowtable_hook->list, list) {
list_for_each_entry(ops, &hook->ops_list, list) {
- ops->pf = NFPROTO_NETDEV;
- ops->hooknum = flowtable_hook->num;
- ops->priority = flowtable_hook->priority;
- ops->priv = &flowtable->data;
- ops->hook = flowtable->data.type->hook;
+ ops->pf = NFPROTO_NETDEV;
+ ops->hooknum = flowtable_hook->num;
+ ops->priority = flowtable_hook->priority;
+ ops->priv = &flowtable->data;
+ ops->hook = flowtable->data.type->hook;
+ ops->hook_ops_type = NF_HOOK_OP_NFT_FT;
}
}
@@ -9053,6 +9089,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
+ struct nftables_pernet *nft_net;
struct nft_hook *hook, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@@ -9069,6 +9106,20 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
if (nft_hook_list_find(&flowtable->hook_list, hook)) {
list_del(&hook->list);
nft_netdev_hook_free(hook);
+ continue;
+ }
+
+ nft_net = nft_pernet(ctx->net);
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->msg_type != NFT_MSG_NEWFLOWTABLE ||
+ trans->table != ctx->table ||
+ !nft_trans_flowtable_update(trans))
+ continue;
+
+ if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) {
+ err = -EEXIST;
+ goto err_flowtable_update_hook;
+ }
}
}
@@ -9382,9 +9433,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
struct nft_hook *hook;
struct nlmsghdr *nlh;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, portid, seq, event, flags, family,
- NFNETLINK_V0, nft_base_seq(net));
+ nlh = nfnl_msg_put(skb, portid, seq,
+ nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -9394,7 +9445,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
NFTA_FLOWTABLE_PAD))
goto nla_put_failure;
- if (event == NFT_MSG_DELFLOWTABLE && !hook_list) {
+ if (!hook_list &&
+ (event == NFT_MSG_DELFLOWTABLE ||
+ event == NFT_MSG_DESTROYFLOWTABLE)) {
nlmsg_end(skb, nlh);
return 0;
}
@@ -9419,8 +9472,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
list_for_each_entry_rcu(hook, hook_list, list,
lockdep_commit_lock_is_held(net)) {
- if (nla_put(skb, NFTA_DEVICE_NAME,
- hook->ifnamelen, hook->ifname))
+ if (nft_nla_put_hook_dev(skb, hook))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
@@ -9452,7 +9504,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -9637,17 +9689,16 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
struct nlmsghdr *nlh;
char buf[TASK_COMM_LEN];
int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
- NFNETLINK_V0, nft_base_seq(net));
+ NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
+ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_base_seq(net))) ||
nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
goto nla_put_failure;
@@ -9686,64 +9737,6 @@ struct nf_hook_ops *nft_hook_find_ops_rcu(const struct nft_hook *hook,
}
EXPORT_SYMBOL_GPL(nft_hook_find_ops_rcu);
-static void
-nf_tables_device_notify(const struct nft_table *table, int attr,
- const char *name, const struct nft_hook *hook,
- const struct net_device *dev, int event)
-{
- struct net *net = dev_net(dev);
- struct nlmsghdr *nlh;
- struct sk_buff *skb;
- u16 flags = 0;
-
- if (!nfnetlink_has_listeners(net, NFNLGRP_NFT_DEV))
- return;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- goto err;
-
- event = event == NETDEV_REGISTER ? NFT_MSG_NEWDEV : NFT_MSG_DELDEV;
- event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
- nlh = nfnl_msg_put(skb, 0, 0, event, flags, table->family,
- NFNETLINK_V0, nft_base_seq(net));
- if (!nlh)
- goto err;
-
- if (nla_put_string(skb, NFTA_DEVICE_TABLE, table->name) ||
- nla_put_string(skb, attr, name) ||
- nla_put(skb, NFTA_DEVICE_SPEC, hook->ifnamelen, hook->ifname) ||
- nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
- goto err;
-
- nlmsg_end(skb, nlh);
- nfnetlink_send(skb, net, 0, NFNLGRP_NFT_DEV,
- nlmsg_report(nlh), GFP_KERNEL);
- return;
-err:
- if (skb)
- kfree_skb(skb);
- nfnetlink_set_err(net, 0, NFNLGRP_NFT_DEV, -ENOBUFS);
-}
-
-void
-nf_tables_chain_device_notify(const struct nft_chain *chain,
- const struct nft_hook *hook,
- const struct net_device *dev, int event)
-{
- nf_tables_device_notify(chain->table, NFTA_DEVICE_CHAIN,
- chain->name, hook, dev, event);
-}
-
-static void
-nf_tables_flowtable_device_notify(const struct nft_flowtable *ft,
- const struct nft_hook *hook,
- const struct net_device *dev, int event)
-{
- nf_tables_device_notify(ft->table, NFTA_DEVICE_FLOWTABLE,
- ft->name, hook, dev, event);
-}
-
static int nft_flowtable_event(unsigned long event, struct net_device *dev,
struct nft_flowtable *flowtable, bool changename)
{
@@ -9777,12 +9770,13 @@ static int nft_flowtable_event(unsigned long event, struct net_device *dev,
if (!ops)
return 1;
- ops->pf = NFPROTO_NETDEV;
- ops->hooknum = flowtable->hooknum;
- ops->priority = flowtable->data.priority;
- ops->priv = &flowtable->data;
- ops->hook = flowtable->data.type->hook;
- ops->dev = dev;
+ ops->pf = NFPROTO_NETDEV;
+ ops->hooknum = flowtable->hooknum;
+ ops->priority = flowtable->data.priority;
+ ops->priv = &flowtable->data;
+ ops->hook = flowtable->data.type->hook;
+ ops->hook_ops_type = NF_HOOK_OP_NFT_FT;
+ ops->dev = dev;
if (nft_register_flowtable_ops(dev_net(dev),
flowtable, ops)) {
kfree(ops);
@@ -9791,7 +9785,6 @@ static int nft_flowtable_event(unsigned long event, struct net_device *dev,
list_add_tail_rcu(&ops->list, &hook->ops_list);
break;
}
- nf_tables_flowtable_device_notify(flowtable, hook, dev, event);
break;
}
return 0;
@@ -10967,11 +10960,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
* Bump generation counter, invalidate any dump in progress.
* Cannot fail after this point.
*/
- base_seq = READ_ONCE(nft_net->base_seq);
+ base_seq = nft_base_seq(net);
while (++base_seq == 0)
;
- WRITE_ONCE(nft_net->base_seq, base_seq);
+ /* pairs with smp_load_acquire in nft_lookup_eval */
+ smp_store_release(&net->nft.base_seq, base_seq);
gc_seq = nft_gc_seq_begin(nft_net);
@@ -11180,7 +11174,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_commit_notify(net, NETLINK_CB(skb).portid);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
- nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+ nf_tables_commit_audit_log(&adl, nft_base_seq(net));
nft_gc_seq_end(nft_net, gc_seq);
nft_net->validate_state = NFT_VALIDATE_SKIP;
@@ -11505,7 +11499,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
mutex_lock(&nft_net->commit_mutex);
nft_net->tstamp = get_jiffies_64();
- genid_ok = genid == 0 || nft_net->base_seq == genid;
+ genid_ok = genid == 0 || nft_base_seq(net) == genid;
if (!genid_ok)
mutex_unlock(&nft_net->commit_mutex);
@@ -12142,7 +12136,7 @@ static int __net_init nf_tables_init_net(struct net *net)
INIT_LIST_HEAD(&nft_net->module_list);
INIT_LIST_HEAD(&nft_net->notify_list);
mutex_init(&nft_net->commit_mutex);
- nft_net->base_seq = 1;
+ net->nft.base_seq = 1;
nft_net->gc_seq = 0;
nft_net->validate_state = NFT_VALIDATE_SKIP;
INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index ae3fe87195ab..a88abae5a9de 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -127,6 +127,9 @@ static int nf_trace_fill_ct_info(struct sk_buff *nlskb,
if (nla_put_be32(nlskb, NFTA_TRACE_CT_ID, (__force __be32)id))
return -1;
+ /* Kernel implementation detail, withhold this from userspace for now */
+ status &= ~IPS_NAT_CLASH;
+
if (status && nla_put_be32(nlskb, NFTA_TRACE_CT_STATUS, htonl(status)))
return -1;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index ac77fc21632d..811d02b4c4f7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -86,7 +86,6 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = {
[NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
[NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
[NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES,
- [NFNLGRP_NFT_DEV] = NFNL_SUBSYS_NFTABLES,
};
static struct nfnl_net *nfnl_pernet(struct net *net)
@@ -377,6 +376,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
struct netlink_ext_ack extack;
+ struct nlmsghdr *onlh = nlh;
LIST_HEAD(err_list);
u32 status;
int err;
@@ -387,6 +387,7 @@ replay:
status = 0;
replay_abort:
skb = netlink_skb_clone(oskb, GFP_KERNEL);
+ nlh = onlh;
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM, NULL);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index eab4f476b47f..38d75484e531 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -461,11 +461,6 @@ static int cttimeout_default_get(struct sk_buff *skb,
case IPPROTO_UDPLITE:
timeouts = nf_udp_pernet(info->net)->timeouts;
break;
- case IPPROTO_DCCP:
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- timeouts = nf_dccp_pernet(info->net)->dccp_timeout;
-#endif
- break;
case IPPROTO_ICMPV6:
timeouts = &nf_icmpv6_pernet(info->net)->timeout;
break;
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
index ade8ee1988b1..92d869317cba 100644
--- a/net/netfilter/nfnetlink_hook.c
+++ b/net/netfilter/nfnetlink_hook.c
@@ -109,13 +109,30 @@ cancel_nest:
return -EMSGSIZE;
}
+static int nfnl_hook_put_nft_info_desc(struct sk_buff *nlskb, const char *tname,
+ const char *name, u8 family)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
+ if (!nest ||
+ nla_put_string(nlskb, NFNLA_CHAIN_TABLE, tname) ||
+ nla_put_string(nlskb, NFNLA_CHAIN_NAME, name) ||
+ nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, family)) {
+ nla_nest_cancel(nlskb, nest);
+ return -EMSGSIZE;
+ }
+ nla_nest_end(nlskb, nest);
+ return 0;
+}
+
static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
const struct nfnl_dump_hook_data *ctx,
unsigned int seq,
struct nft_chain *chain)
{
struct net *net = sock_net(nlskb->sk);
- struct nlattr *nest, *nest2;
+ struct nlattr *nest;
int ret = 0;
if (WARN_ON_ONCE(!chain))
@@ -128,29 +145,47 @@ static int nfnl_hook_put_nft_chain_info(struct sk_buff *nlskb,
if (!nest)
return -EMSGSIZE;
- nest2 = nla_nest_start(nlskb, NFNLA_HOOK_INFO_DESC);
- if (!nest2)
- goto cancel_nest;
+ ret = nfnl_hook_put_nft_info_desc(nlskb, chain->table->name,
+ chain->name, chain->table->family);
+ if (ret) {
+ nla_nest_cancel(nlskb, nest);
+ return ret;
+ }
- ret = nla_put_string(nlskb, NFNLA_CHAIN_TABLE, chain->table->name);
- if (ret)
- goto cancel_nest;
+ nla_nest_end(nlskb, nest);
+ return 0;
+}
- ret = nla_put_string(nlskb, NFNLA_CHAIN_NAME, chain->name);
- if (ret)
- goto cancel_nest;
+static int nfnl_hook_put_nft_ft_info(struct sk_buff *nlskb,
+ const struct nfnl_dump_hook_data *ctx,
+ unsigned int seq,
+ struct nf_flowtable *nf_ft)
+{
+ struct nft_flowtable *ft =
+ container_of(nf_ft, struct nft_flowtable, data);
+ struct net *net = sock_net(nlskb->sk);
+ struct nlattr *nest;
+ int ret = 0;
- ret = nla_put_u8(nlskb, NFNLA_CHAIN_FAMILY, chain->table->family);
- if (ret)
- goto cancel_nest;
+ if (WARN_ON_ONCE(!nf_ft))
+ return 0;
- nla_nest_end(nlskb, nest2);
- nla_nest_end(nlskb, nest);
- return ret;
+ if (!nft_is_active(net, ft))
+ return 0;
-cancel_nest:
- nla_nest_cancel(nlskb, nest);
- return -EMSGSIZE;
+ nest = nfnl_start_info_type(nlskb, NFNL_HOOK_TYPE_NFT_FLOWTABLE);
+ if (!nest)
+ return -EMSGSIZE;
+
+ ret = nfnl_hook_put_nft_info_desc(nlskb, ft->table->name,
+ ft->name, ft->table->family);
+ if (ret) {
+ nla_nest_cancel(nlskb, nest);
+ return ret;
+ }
+
+ nla_nest_end(nlskb, nest);
+ return 0;
}
static int nfnl_hook_dump_one(struct sk_buff *nlskb,
@@ -220,6 +255,9 @@ static int nfnl_hook_dump_one(struct sk_buff *nlskb,
case NF_HOOK_OP_BPF:
ret = nfnl_hook_put_bpf_prog_info(nlskb, ctx, seq, ops->priv);
break;
+ case NF_HOOK_OP_NFT_FT:
+ ret = nfnl_hook_put_nft_ft_info(nlskb, ctx, seq, ops->priv);
+ break;
case NF_HOOK_OP_UNDEFINED:
break;
default:
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 846d48ba8965..b16185e9a6dd 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -363,8 +363,6 @@ static int nft_netdev_event(unsigned long event, struct net_device *dev,
list_add_tail_rcu(&ops->list, &hook->ops_list);
break;
}
- nf_tables_chain_device_notify(&basechain->chain,
- hook, dev, event);
break;
}
return 0;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 88922e0e8e83..7807d8129664 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -44,9 +44,9 @@ static int nft_dynset_expr_setup(const struct nft_dynset *priv,
return 0;
}
-static struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
- const struct nft_expr *expr,
- struct nft_regs *regs)
+struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
+ const struct nft_expr *expr,
+ struct nft_regs *regs)
{
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set_ext *ext;
@@ -91,8 +91,8 @@ void nft_dynset_eval(const struct nft_expr *expr,
return;
}
- if (set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
- expr, regs, &ext)) {
+ ext = set->ops->update(set, &regs->data[priv->sreg_key], expr, regs);
+ if (ext) {
if (priv->op == NFT_DYNSET_OP_UPDATE &&
nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
READ_ONCE(nft_set_ext_timeout(ext)->timeout) != 0) {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index c74012c99125..7eedf4e3ae9c 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -407,6 +407,7 @@ err:
regs->verdict.code = NFT_BREAK;
}
+#ifdef CONFIG_NFT_EXTHDR_DCCP
static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -482,6 +483,7 @@ static void nft_exthdr_dccp_eval(const struct nft_expr *expr,
err:
*dest = 0;
}
+#endif
static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
[NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
@@ -634,6 +636,7 @@ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
return 0;
}
+#ifdef CONFIG_NFT_EXTHDR_DCCP
static int nft_exthdr_dccp_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -649,6 +652,7 @@ static int nft_exthdr_dccp_init(const struct nft_ctx *ctx,
return 0;
}
+#endif
static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
{
@@ -779,6 +783,7 @@ static const struct nft_expr_ops nft_exthdr_sctp_ops = {
.reduce = nft_exthdr_reduce,
};
+#ifdef CONFIG_NFT_EXTHDR_DCCP
static const struct nft_expr_ops nft_exthdr_dccp_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
@@ -787,6 +792,7 @@ static const struct nft_expr_ops nft_exthdr_dccp_ops = {
.dump = nft_exthdr_dump,
.reduce = nft_exthdr_reduce,
};
+#endif
static const struct nft_expr_ops *
nft_exthdr_select_ops(const struct nft_ctx *ctx,
@@ -822,10 +828,12 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_sctp_ops;
break;
+#ifdef CONFIG_NFT_EXTHDR_DCCP
case NFT_EXTHDR_OP_DCCP:
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_dccp_ops;
break;
+#endif
}
return ERR_PTR(-EOPNOTSUPP);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 225ff293cd50..14dd1c0698c3 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -9,7 +9,7 @@
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_tables.h>
#include <net/ip.h>
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -236,7 +236,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
- fl.u.ip4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip_hdr(pkt->skb)));
+ fl.u.ip4.flowi4_dscp = ip4h_dscp(ip_hdr(pkt->skb));
fl.u.ip4.flowi4_mark = pkt->skb->mark;
fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
break;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 63ef832b8aa7..58c5b14889c4 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -24,36 +24,73 @@ struct nft_lookup {
struct nft_set_binding binding;
};
-#ifdef CONFIG_MITIGATION_RETPOLINE
-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+static const struct nft_set_ext *
+__nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
+#ifdef CONFIG_MITIGATION_RETPOLINE
if (set->ops == &nft_set_hash_fast_type.ops)
- return nft_hash_lookup_fast(net, set, key, ext);
+ return nft_hash_lookup_fast(net, set, key);
if (set->ops == &nft_set_hash_type.ops)
- return nft_hash_lookup(net, set, key, ext);
+ return nft_hash_lookup(net, set, key);
if (set->ops == &nft_set_rhash_type.ops)
- return nft_rhash_lookup(net, set, key, ext);
+ return nft_rhash_lookup(net, set, key);
if (set->ops == &nft_set_bitmap_type.ops)
- return nft_bitmap_lookup(net, set, key, ext);
+ return nft_bitmap_lookup(net, set, key);
if (set->ops == &nft_set_pipapo_type.ops)
- return nft_pipapo_lookup(net, set, key, ext);
+ return nft_pipapo_lookup(net, set, key);
#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (set->ops == &nft_set_pipapo_avx2_type.ops)
- return nft_pipapo_avx2_lookup(net, set, key, ext);
+ return nft_pipapo_avx2_lookup(net, set, key);
#endif
if (set->ops == &nft_set_rbtree_type.ops)
- return nft_rbtree_lookup(net, set, key, ext);
+ return nft_rbtree_lookup(net, set, key);
WARN_ON_ONCE(1);
- return set->ops->lookup(net, set, key, ext);
+#endif
+ return set->ops->lookup(net, set, key);
+}
+
+static unsigned int nft_base_seq(const struct net *net)
+{
+ /* pairs with smp_store_release() in nf_tables_commit() */
+ return smp_load_acquire(&net->nft.base_seq);
+}
+
+static bool nft_lookup_should_retry(const struct net *net, unsigned int seq)
+{
+ return unlikely(seq != nft_base_seq(net));
+}
+
+const struct nft_set_ext *
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+{
+ const struct nft_set_ext *ext;
+ unsigned int base_seq;
+
+ do {
+ base_seq = nft_base_seq(net);
+
+ ext = __nft_set_do_lookup(net, set, key);
+ if (ext)
+ break;
+ /* No match? There is a small chance that lookup was
+ * performed in the old generation, but nf_tables_commit()
+ * already unlinked a (matching) element.
+ *
+ * We need to repeat the lookup to make sure that we didn't
+ * miss a matching element in the new generation.
+ */
+ } while (nft_lookup_should_retry(net, base_seq));
+
+ return ext;
}
EXPORT_SYMBOL_GPL(nft_set_do_lookup);
-#endif
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs,
@@ -61,12 +98,12 @@ void nft_lookup_eval(const struct nft_expr *expr,
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
- const struct nft_set_ext *ext = NULL;
const struct net *net = nft_net(pkt);
+ const struct nft_set_ext *ext;
bool found;
- found = nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext) ^
- priv->invert;
+ ext = nft_set_do_lookup(net, set, &regs->data[priv->sreg]);
+ found = !!ext ^ priv->invert;
if (!found) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 09da7a3f9f96..1a62e384766a 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -22,6 +22,35 @@ void nft_objref_eval(const struct nft_expr *expr,
obj->ops->eval(obj, regs, pkt);
}
+static int nft_objref_validate_obj_type(const struct nft_ctx *ctx, u32 type)
+{
+ unsigned int hooks;
+
+ switch (type) {
+ case NFT_OBJECT_SYNPROXY:
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
+ hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD);
+
+ return nft_chain_validate_hooks(ctx->chain, hooks);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nft_objref_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_object *obj = nft_objref_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, obj->ops->type->type);
+}
+
static int nft_objref_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -93,6 +122,7 @@ static const struct nft_expr_ops nft_objref_ops = {
.activate = nft_objref_activate,
.deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
+ .validate = nft_objref_validate,
.reduce = NFT_REDUCE_READONLY,
};
@@ -111,10 +141,9 @@ void nft_objref_map_eval(const struct nft_expr *expr,
struct net *net = nft_net(pkt);
const struct nft_set_ext *ext;
struct nft_object *obj;
- bool found;
- found = nft_set_do_lookup(net, set, &regs->data[priv->sreg], &ext);
- if (!found) {
+ ext = nft_set_do_lookup(net, set, &regs->data[priv->sreg]);
+ if (!ext) {
ext = nft_set_catchall_lookup(net, set);
if (!ext) {
regs->verdict.code = NFT_BREAK;
@@ -198,6 +227,14 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
nf_tables_destroy_set(ctx, priv->set);
}
+static int nft_objref_map_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ const struct nft_objref_map *priv = nft_expr_priv(expr);
+
+ return nft_objref_validate_obj_type(ctx, priv->set->objtype);
+}
+
static const struct nft_expr_ops nft_objref_map_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
@@ -207,6 +244,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
+ .validate = nft_objref_map_validate,
.reduce = NFT_REDUCE_READONLY,
};
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 7dfc5343dae4..b0214418f75a 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -40,7 +40,7 @@ static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
/* add vlan header into the user buffer for if tag was removed by offloads */
static bool
-nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u16 offset, u8 len)
{
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
@@ -212,7 +212,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
[NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
[NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
[NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
- [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
+ [NFTA_PAYLOAD_OFFSET] = { .type = NLA_BE32 },
[NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255),
@@ -684,7 +684,7 @@ static const struct nft_expr_ops nft_payload_inner_ops = {
static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
{
- *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
+ csum_replace4(sum, (__force __be32)fsum, (__force __be32)tsum);
if (*sum == 0)
*sum = CSUM_MANGLED_0;
}
@@ -797,7 +797,7 @@ static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
struct nft_payload_set {
enum nft_payload_bases base:8;
- u8 offset;
+ u16 offset;
u8 len;
u8 sreg;
u8 csum_type;
@@ -812,7 +812,7 @@ struct nft_payload_vlan_hdr {
};
static bool
-nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u16 offset, u8 len,
int *vlan_hlen)
{
struct nft_payload_vlan_hdr *vlanh;
@@ -940,14 +940,18 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
+ u32 csum_offset, offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
struct nft_payload_set *priv = nft_expr_priv(expr);
- u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
int err;
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
- priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U16_MAX, &offset);
+ if (err < 0)
+ return err;
+ priv->offset = offset;
+
if (tb[NFTA_PAYLOAD_CSUM_TYPE])
csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
@@ -1069,7 +1073,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_PAYLOAD_DREG] == NULL)
return ERR_PTR(-EINVAL);
- err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
+ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U16_MAX, &offset);
if (err < 0)
return ERR_PTR(err);
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 12390d2e994f..8d3f040a904a 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -75,16 +75,21 @@ nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
}
INDIRECT_CALLABLE_SCOPE
-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+const struct nft_set_ext *
+nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
const struct nft_bitmap *priv = nft_set_priv(set);
+ static const struct nft_set_ext found;
u8 genmask = nft_genmask_cur(net);
u32 idx, off;
nft_bitmap_location(set, key, &idx, &off);
- return nft_bitmap_active(priv->bitmap, idx, off, genmask);
+ if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+ return &found;
+
+ return NULL;
}
static struct nft_bitmap_elem *
@@ -221,7 +226,8 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
- list_for_each_entry_rcu(be, &priv->list, head) {
+ list_for_each_entry_rcu(be, &priv->list, head,
+ lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
if (iter->count < iter->skip)
goto cont;
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index abb0c8ec6371..ba01ce75d6de 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -30,6 +30,7 @@ struct nft_rhash {
struct nft_rhash_elem {
struct nft_elem_priv priv;
struct rhash_head node;
+ struct llist_node walk_node;
u32 wq_gc_seq;
struct nft_set_ext ext;
};
@@ -81,8 +82,9 @@ static const struct rhashtable_params nft_rhash_params = {
};
INDIRECT_CALLABLE_SCOPE
-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+const struct nft_set_ext *
+nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
struct nft_rhash *priv = nft_set_priv(set);
const struct nft_rhash_elem *he;
@@ -95,9 +97,9 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
if (he != NULL)
- *ext = &he->ext;
+ return &he->ext;
- return !!he;
+ return NULL;
}
static struct nft_elem_priv *
@@ -120,14 +122,9 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
return ERR_PTR(-ENOENT);
}
-static bool nft_rhash_update(struct nft_set *set, const u32 *key,
- struct nft_elem_priv *
- (*new)(struct nft_set *,
- const struct nft_expr *,
- struct nft_regs *regs),
- const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_set_ext **ext)
+static const struct nft_set_ext *
+nft_rhash_update(struct nft_set *set, const u32 *key,
+ const struct nft_expr *expr, struct nft_regs *regs)
{
struct nft_rhash *priv = nft_set_priv(set);
struct nft_rhash_elem *he, *prev;
@@ -143,11 +140,12 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
if (he != NULL)
goto out;
- elem_priv = new(set, expr, regs);
+ elem_priv = nft_dynset_new(set, expr, regs);
if (!elem_priv)
goto err1;
he = nft_elem_priv_cast(elem_priv);
+ init_llist_node(&he->walk_node);
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
nft_rhash_params);
if (IS_ERR(prev))
@@ -161,14 +159,13 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
}
out:
- *ext = &he->ext;
- return true;
+ return &he->ext;
err2:
nft_set_elem_destroy(set, &he->priv, true);
atomic_dec(&set->nelems);
err1:
- return false;
+ return NULL;
}
static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
@@ -185,6 +182,7 @@ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
};
struct nft_rhash_elem *prev;
+ init_llist_node(&he->walk_node);
prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
nft_rhash_params);
if (IS_ERR(prev))
@@ -266,12 +264,12 @@ static bool nft_rhash_delete(const struct nft_set *set,
return true;
}
-static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_iter *iter)
+static void nft_rhash_walk_ro(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_iter *iter)
{
struct nft_rhash *priv = nft_set_priv(set);
- struct nft_rhash_elem *he;
struct rhashtable_iter hti;
+ struct nft_rhash_elem *he;
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti);
@@ -300,6 +298,97 @@ cont:
rhashtable_walk_exit(&hti);
}
+static void nft_rhash_walk_update(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ struct nft_rhash *priv = nft_set_priv(set);
+ struct nft_rhash_elem *he, *tmp;
+ struct llist_node *first_node;
+ struct rhashtable_iter hti;
+ LLIST_HEAD(walk_list);
+
+ lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
+
+ if (set->in_update_walk) {
+ /* This can happen with bogus rulesets during ruleset validation
+ * when a verdict map causes a jump back to the same map.
+ *
+ * Without this extra check the walk_next loop below will see
+ * elems on the callers walk_list and skip (not validate) them.
+ */
+ iter->err = -EMLINK;
+ return;
+ }
+
+ /* walk happens under RCU.
+ *
+ * We create a snapshot list so ->iter callback can sleep.
+ * commit_mutex is held, elements can ...
+ * .. be added in parallel from dataplane (dynset)
+ * .. be marked as dead in parallel from dataplane (dynset).
+ * .. be queued for removal in parallel (gc timeout).
+ * .. not be freed: transaction mutex is held.
+ */
+ rhashtable_walk_enter(&priv->ht, &hti);
+ rhashtable_walk_start(&hti);
+
+ while ((he = rhashtable_walk_next(&hti))) {
+ if (IS_ERR(he)) {
+ if (PTR_ERR(he) != -EAGAIN) {
+ iter->err = PTR_ERR(he);
+ break;
+ }
+
+ continue;
+ }
+
+ /* rhashtable resized during walk, skip */
+ if (llist_on_list(&he->walk_node))
+ continue;
+
+ llist_add(&he->walk_node, &walk_list);
+ }
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ first_node = __llist_del_all(&walk_list);
+ set->in_update_walk = true;
+ llist_for_each_entry_safe(he, tmp, first_node, walk_node) {
+ if (iter->err == 0) {
+ iter->err = iter->fn(ctx, set, iter, &he->priv);
+ if (iter->err == 0)
+ iter->count++;
+ }
+
+ /* all entries must be cleared again, else next ->walk iteration
+ * will skip entries.
+ */
+ init_llist_node(&he->walk_node);
+ }
+ set->in_update_walk = false;
+}
+
+static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ switch (iter->type) {
+ case NFT_ITER_UPDATE:
+ /* only relevant for netlink dumps which use READ type */
+ WARN_ON_ONCE(iter->skip != 0);
+
+ nft_rhash_walk_update(ctx, set, iter);
+ break;
+ case NFT_ITER_READ:
+ nft_rhash_walk_ro(ctx, set, iter);
+ break;
+ default:
+ iter->err = -EINVAL;
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set,
struct nft_set_ext *ext)
{
@@ -507,8 +596,9 @@ struct nft_hash_elem {
};
INDIRECT_CALLABLE_SCOPE
-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+const struct nft_set_ext *
+nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
@@ -519,12 +609,10 @@ bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
hash = reciprocal_scale(hash, priv->buckets);
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) &&
- nft_set_elem_active(&he->ext, genmask)) {
- *ext = &he->ext;
- return true;
- }
+ nft_set_elem_active(&he->ext, genmask))
+ return &he->ext;
}
- return false;
+ return NULL;
}
static struct nft_elem_priv *
@@ -547,9 +635,9 @@ nft_hash_get(const struct net *net, const struct nft_set *set,
}
INDIRECT_CALLABLE_SCOPE
-bool nft_hash_lookup_fast(const struct net *net,
- const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+const struct nft_set_ext *
+nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
struct nft_hash *priv = nft_set_priv(set);
u8 genmask = nft_genmask_cur(net);
@@ -562,12 +650,10 @@ bool nft_hash_lookup_fast(const struct net *net,
hlist_for_each_entry_rcu(he, &priv->table[hash], node) {
k2 = *(u32 *)nft_set_ext_key(&he->ext)->data;
if (k1 == k2 &&
- nft_set_elem_active(&he->ext, genmask)) {
- *ext = &he->ext;
- return true;
- }
+ nft_set_elem_active(&he->ext, genmask))
+ return &he->ext;
}
- return false;
+ return NULL;
}
static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index c5855069bdab..112fe46788b6 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -397,42 +397,45 @@ int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
}
/**
- * nft_pipapo_lookup() - Lookup function
- * @net: Network namespace
- * @set: nftables API set representation
- * @key: nftables API element representation containing key data
- * @ext: nftables API extension pointer, filled with matching reference
+ * pipapo_get_slow() - Get matching element reference given key data
+ * @m: storage containing the set elements
+ * @data: Key data to be matched against existing elements
+ * @genmask: If set, check that element is active in given genmask
+ * @tstamp: timestamp to check for expired elements
*
* For more details, see DOC: Theory of Operation.
*
- * Return: true on match, false otherwise.
+ * This is the main lookup function. It matches key data against either
+ * the working match set or the uncommitted copy, depending on what the
+ * caller passed to us.
+ * nft_pipapo_get (lookup from userspace/control plane) and nft_pipapo_lookup
+ * (datapath lookup) pass the active copy.
+ * The insertion path will pass the uncommitted working copy.
+ *
+ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
*/
-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+static struct nft_pipapo_elem *pipapo_get_slow(const struct nft_pipapo_match *m,
+ const u8 *data, u8 genmask,
+ u64 tstamp)
{
- struct nft_pipapo *priv = nft_set_priv(set);
+ unsigned long *res_map, *fill_map, *map;
struct nft_pipapo_scratch *scratch;
- unsigned long *res_map, *fill_map;
- u8 genmask = nft_genmask_cur(net);
- const struct nft_pipapo_match *m;
const struct nft_pipapo_field *f;
- const u8 *rp = (const u8 *)key;
bool map_index;
int i;
local_bh_disable();
- m = rcu_dereference(priv->match);
-
- if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
- goto out;
-
scratch = *raw_cpu_ptr(m->scratch);
+ if (unlikely(!scratch))
+ goto out;
+ __local_lock_nested_bh(&scratch->bh_lock);
map_index = scratch->map_index;
- res_map = scratch->map + (map_index ? m->bsize_max : 0);
- fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
+ map = NFT_PIPAPO_LT_ALIGN(&scratch->__map[0]);
+ res_map = map + (map_index ? m->bsize_max : 0);
+ fill_map = map + (map_index ? 0 : m->bsize_max);
pipapo_resmap_init(m, res_map);
@@ -444,12 +447,12 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
* packet bytes value, then AND bucket value
*/
if (likely(f->bb == 8))
- pipapo_and_field_buckets_8bit(f, res_map, rp);
+ pipapo_and_field_buckets_8bit(f, res_map, data);
else
- pipapo_and_field_buckets_4bit(f, res_map, rp);
+ pipapo_and_field_buckets_4bit(f, res_map, data);
NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
- rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
+ data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
/* Now populate the bitmap for the next field, unless this is
* the last field, in which case return the matched 'ext'
@@ -463,15 +466,18 @@ next_match:
last);
if (b < 0) {
scratch->map_index = map_index;
+ __local_unlock_nested_bh(&scratch->bh_lock);
local_bh_enable();
- return false;
+ return NULL;
}
if (last) {
- *ext = &f->mt[b].e->ext;
- if (unlikely(nft_set_elem_expired(*ext) ||
- !nft_set_elem_active(*ext, genmask)))
+ struct nft_pipapo_elem *e;
+
+ e = f->mt[b].e;
+ if (unlikely(__nft_set_elem_expired(&e->ext, tstamp) ||
+ !nft_set_elem_active(&e->ext, genmask)))
goto next_match;
/* Last field: we're just returning the key without
@@ -480,9 +486,9 @@ next_match:
* *next* bitmap (not initial) for the next packet.
*/
scratch->map_index = map_index;
+ __local_unlock_nested_bh(&scratch->bh_lock);
local_bh_enable();
-
- return true;
+ return e;
}
/* Swap bitmap indices: res_map is the initial bitmap for the
@@ -492,112 +498,88 @@ next_match:
map_index = !map_index;
swap(res_map, fill_map);
- rp += NFT_PIPAPO_GROUPS_PADDING(f);
+ data += NFT_PIPAPO_GROUPS_PADDING(f);
}
+ __local_unlock_nested_bh(&scratch->bh_lock);
out:
local_bh_enable();
- return false;
+ return NULL;
}
/**
* pipapo_get() - Get matching element reference given key data
- * @net: Network namespace
- * @set: nftables API set representation
- * @m: storage containing active/existing elements
+ * @m: Storage containing the set elements
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
- * @tstamp: timestamp to check for expired elements
- * @gfp: the type of memory to allocate (see kmalloc).
+ * @tstamp: Timestamp to check for expired elements
*
- * This is essentially the same as the lookup function, except that it matches
- * key data against the uncommitted copy and doesn't use preallocated maps for
- * bitmap results.
+ * This is a dispatcher function, either calling out the generic C
+ * implementation or, if available, the AVX2 one.
+ * This helper is only called from the control plane, with either RCU
+ * read lock or transaction mutex held.
*
- * Return: pointer to &struct nft_pipapo_elem on match, error pointer otherwise.
+ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
*/
-static struct nft_pipapo_elem *pipapo_get(const struct net *net,
- const struct nft_set *set,
- const struct nft_pipapo_match *m,
+static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
const u8 *data, u8 genmask,
- u64 tstamp, gfp_t gfp)
+ u64 tstamp)
{
- struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
- unsigned long *res_map, *fill_map = NULL;
- const struct nft_pipapo_field *f;
- int i;
-
- if (m->bsize_max == 0)
- return ret;
+ struct nft_pipapo_elem *e;
- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
- if (!res_map) {
- ret = ERR_PTR(-ENOMEM);
- goto out;
- }
+ local_bh_disable();
- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
- if (!fill_map) {
- ret = ERR_PTR(-ENOMEM);
- goto out;
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
+ if (boot_cpu_has(X86_FEATURE_AVX2) && irq_fpu_usable()) {
+ e = pipapo_get_avx2(m, data, genmask, tstamp);
+ local_bh_enable();
+ return e;
}
+#endif
+ e = pipapo_get_slow(m, data, genmask, tstamp);
+ local_bh_enable();
+ return e;
+}
- pipapo_resmap_init(m, res_map);
-
- nft_pipapo_for_each_field(f, i, m) {
- bool last = i == m->field_count - 1;
- int b;
-
- /* For each bit group: select lookup table bucket depending on
- * packet bytes value, then AND bucket value
- */
- if (f->bb == 8)
- pipapo_and_field_buckets_8bit(f, res_map, data);
- else if (f->bb == 4)
- pipapo_and_field_buckets_4bit(f, res_map, data);
- else
- BUG();
-
- data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
-
- /* Now populate the bitmap for the next field, unless this is
- * the last field, in which case return the matched 'ext'
- * pointer if any.
- *
- * Now res_map contains the matching bitmap, and fill_map is the
- * bitmap for the next field.
- */
-next_match:
- b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
- last);
- if (b < 0)
- goto out;
-
- if (last) {
- if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
- goto next_match;
- if ((genmask &&
- !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
- goto next_match;
-
- ret = f->mt[b].e;
- goto out;
- }
-
- data += NFT_PIPAPO_GROUPS_PADDING(f);
+/**
+ * nft_pipapo_lookup() - Dataplane fronted for main lookup function
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @key: pointer to nft registers containing key data
+ *
+ * This function is called from the data path. It will search for
+ * an element matching the given key in the current active copy.
+ * Unlike other set types, this uses 0 instead of nft_genmask_cur().
+ *
+ * This is because new (future) elements are not reachable from
+ * priv->match, they get added to priv->clone instead.
+ * When the commit phase flips the generation bitmask, the
+ * 'now old' entries are skipped but without the 'now current'
+ * elements becoming visible. Using nft_genmask_cur() thus creates
+ * inconsistent state: matching old entries get skipped but thew
+ * newly matching entries are unreachable.
+ *
+ * GENMASK_ANY doesn't work for the same reason: old-gen entries get
+ * skipped, new-gen entries are only reachable from priv->clone.
+ *
+ * nft_pipapo_commit swaps ->clone and ->match shortly after the
+ * genbit flip. As ->clone doesn't contain the old entries in the first
+ * place, lookup will only find the now-current ones.
+ *
+ * Return: ntables API extension pointer or NULL if no match.
+ */
+const struct nft_set_ext *
+nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_elem *e;
- /* Swap bitmap indices: fill_map will be the initial bitmap for
- * the next field (i.e. the new res_map), and res_map is
- * guaranteed to be all-zeroes at this point, ready to be filled
- * according to the next mapping table.
- */
- swap(res_map, fill_map);
- }
+ m = rcu_dereference(priv->match);
+ e = pipapo_get_slow(m, (const u8 *)key, 0, get_jiffies_64());
-out:
- kfree(fill_map);
- kfree(res_map);
- return ret;
+ return e ? &e->ext : NULL;
}
/**
@@ -606,6 +588,11 @@ out:
* @set: nftables API set representation
* @elem: nftables API element representation containing key data
* @flags: Unused
+ *
+ * This function is called from the control plane path under
+ * RCU read lock.
+ *
+ * Return: set element private pointer or ERR_PTR(-ENOENT).
*/
static struct nft_elem_priv *
nft_pipapo_get(const struct net *net, const struct nft_set *set,
@@ -615,11 +602,10 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
struct nft_pipapo_match *m = rcu_dereference(priv->match);
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
- nft_genmask_cur(net), get_jiffies_64(),
- GFP_ATOMIC);
- if (IS_ERR(e))
- return ERR_CAST(e);
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net), get_jiffies_64());
+ if (!e)
+ return ERR_PTR(-ENOENT);
return &e->priv;
}
@@ -1204,22 +1190,17 @@ static void pipapo_map(struct nft_pipapo_match *m,
}
/**
- * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
+ * pipapo_free_scratch() - Free per-CPU map at original address
* @m: Matching data
* @cpu: CPU number
*/
static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
{
struct nft_pipapo_scratch *s;
- void *mem;
s = *per_cpu_ptr(m->scratch, cpu);
- if (!s)
- return;
- mem = s;
- mem -= s->align_off;
- kfree(mem);
+ kvfree(s);
}
/**
@@ -1236,14 +1217,10 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
for_each_possible_cpu(i) {
struct nft_pipapo_scratch *scratch;
-#ifdef NFT_PIPAPO_ALIGN
- void *scratch_aligned;
- u32 align_off;
-#endif
- scratch = kzalloc_node(struct_size(scratch, map,
- bsize_max * 2) +
- NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL_ACCOUNT, cpu_to_node(i));
+
+ scratch = kvzalloc_node(struct_size(scratch, __map, bsize_max * 2) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+ GFP_KERNEL_ACCOUNT, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
* allocations: this means that some scratch maps have
@@ -1256,23 +1233,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
}
pipapo_free_scratch(clone, i);
-
-#ifdef NFT_PIPAPO_ALIGN
- /* Align &scratch->map (not the struct itself): the extra
- * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
- * above guarantee we can waste up to those bytes in order
- * to align the map field regardless of its offset within
- * the struct.
- */
- BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
-
- scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
- scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
- align_off = scratch_aligned - (void *)scratch;
-
- scratch = scratch_aligned;
- scratch->align_off = align_off;
-#endif
+ local_lock_init(&scratch->bh_lock);
*per_cpu_ptr(clone->scratch, i) = scratch;
}
@@ -1345,8 +1306,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
end = start;
- dup = pipapo_get(net, set, m, start, genmask, tstamp, GFP_KERNEL);
- if (!IS_ERR(dup)) {
+ dup = pipapo_get(m, start, genmask, tstamp);
+ if (dup) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
@@ -1365,15 +1326,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
return -ENOTEMPTY;
}
- if (PTR_ERR(dup) == -ENOENT) {
- /* Look for partially overlapping entries */
- dup = pipapo_get(net, set, m, end, nft_genmask_next(net), tstamp,
- GFP_KERNEL);
- }
-
- if (PTR_ERR(dup) != -ENOENT) {
- if (IS_ERR(dup))
- return PTR_ERR(dup);
+ /* Look for partially overlapping entries */
+ dup = pipapo_get(m, end, nft_genmask_next(net), tstamp);
+ if (dup) {
*elem_priv = &dup->priv;
return -ENOTEMPTY;
}
@@ -1914,9 +1869,9 @@ nft_pipapo_deactivate(const struct net *net, const struct nft_set *set,
if (!m)
return NULL;
- e = pipapo_get(net, set, m, (const u8 *)elem->key.val.data,
- nft_genmask_next(net), nft_net_tstamp(net), GFP_KERNEL);
- if (IS_ERR(e))
+ e = pipapo_get(m, (const u8 *)elem->key.val.data,
+ nft_genmask_next(net), nft_net_tstamp(net));
+ if (!e)
return NULL;
nft_set_elem_change_active(net, set, &e->ext);
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 4a2ff85ce1c4..eaab422aa56a 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -124,14 +124,14 @@ struct nft_pipapo_field {
/**
* struct nft_pipapo_scratch - percpu data used for lookup and matching
+ * @bh_lock: PREEMPT_RT local spinlock
* @map_index: Current working bitmap index, toggled between field matches
- * @align_off: Offset to get the originally allocated address
- * @map: store partial matching results during lookup
+ * @__map: store partial matching results during lookup
*/
struct nft_pipapo_scratch {
+ local_lock_t bh_lock;
u8 map_index;
- u32 align_off;
- unsigned long map[];
+ unsigned long __map[];
};
/**
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index be7c16c79f71..7ff90325c97f 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1099,7 +1099,7 @@ bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
desc->field_count < NFT_PIPAPO_MIN_FIELDS)
return false;
- if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_AVX))
+ if (!boot_cpu_has(X86_FEATURE_AVX2))
return false;
est->size = pipapo_estimate_size(desc);
@@ -1133,74 +1133,59 @@ static inline void pipapo_resmap_init_avx2(const struct nft_pipapo_match *m, uns
}
/**
- * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
- * @net: Network namespace
- * @set: nftables API set representation
- * @key: nftables API element representation containing key data
- * @ext: nftables API extension pointer, filled with matching reference
+ * pipapo_get_avx2() - Lookup function for AVX2 implementation
+ * @m: Storage containing the set elements
+ * @data: Key data to be matched against existing elements
+ * @genmask: If set, check that element is active in given genmask
+ * @tstamp: Timestamp to check for expired elements
*
* For more details, see DOC: Theory of Operation in nft_set_pipapo.c.
*
* This implementation exploits the repetitive characteristic of the algorithm
* to provide a fast, vectorised version using the AVX2 SIMD instruction set.
*
- * Return: true on match, false otherwise.
+ * The caller must check that the FPU is usable.
+ * This function must be called with BH disabled.
+ *
+ * Return: pointer to &struct nft_pipapo_elem on match, NULL otherwise.
*/
-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+struct nft_pipapo_elem *pipapo_get_avx2(const struct nft_pipapo_match *m,
+ const u8 *data, u8 genmask,
+ u64 tstamp)
{
- struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_scratch *scratch;
- u8 genmask = nft_genmask_cur(net);
- const struct nft_pipapo_match *m;
const struct nft_pipapo_field *f;
- const u8 *rp = (const u8 *)key;
- unsigned long *res, *fill;
+ unsigned long *res, *fill, *map;
bool map_index;
- int i, ret = 0;
-
- local_bh_disable();
+ int i;
- if (unlikely(!irq_fpu_usable())) {
- bool fallback_res = nft_pipapo_lookup(net, set, key, ext);
+ scratch = *raw_cpu_ptr(m->scratch);
+ if (unlikely(!scratch))
+ return NULL;
- local_bh_enable();
- return fallback_res;
- }
+ __local_lock_nested_bh(&scratch->bh_lock);
+ map_index = scratch->map_index;
+ map = NFT_PIPAPO_LT_ALIGN(&scratch->__map[0]);
+ res = map + (map_index ? m->bsize_max : 0);
+ fill = map + (map_index ? 0 : m->bsize_max);
- m = rcu_dereference(priv->match);
+ pipapo_resmap_init_avx2(m, res);
- /* This also protects access to all data related to scratch maps.
- *
- * Note that we don't need a valid MXCSR state for any of the
+ /* Note that we don't need a valid MXCSR state for any of the
* operations we use here, so pass 0 as mask and spare a LDMXCSR
* instruction.
*/
kernel_fpu_begin_mask(0);
- scratch = *raw_cpu_ptr(m->scratch);
- if (unlikely(!scratch)) {
- kernel_fpu_end();
- local_bh_enable();
- return false;
- }
-
- map_index = scratch->map_index;
-
- res = scratch->map + (map_index ? m->bsize_max : 0);
- fill = scratch->map + (map_index ? 0 : m->bsize_max);
-
- pipapo_resmap_init_avx2(m, res);
-
nft_pipapo_avx2_prepare();
-next_match:
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1, first = !i;
+ int ret = 0;
#define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n) \
(ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f, \
- ret, rp, \
+ ret, data, \
first, last))
if (likely(f->bb == 8)) {
@@ -1216,7 +1201,7 @@ next_match:
NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
} else {
ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
- ret, rp,
+ ret, data,
first, last);
}
} else {
@@ -1232,7 +1217,7 @@ next_match:
NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
} else {
ret = nft_pipapo_avx2_lookup_slow(m, res, fill, f,
- ret, rp,
+ ret, data,
first, last);
}
}
@@ -1240,29 +1225,78 @@ next_match:
#undef NFT_SET_PIPAPO_AVX2_LOOKUP
- if (ret < 0)
- goto out;
+next_match:
+ if (ret < 0) {
+ scratch->map_index = map_index;
+ kernel_fpu_end();
+ __local_unlock_nested_bh(&scratch->bh_lock);
+ return NULL;
+ }
if (last) {
- *ext = &f->mt[ret].e->ext;
- if (unlikely(nft_set_elem_expired(*ext) ||
- !nft_set_elem_active(*ext, genmask))) {
- ret = 0;
+ struct nft_pipapo_elem *e;
+
+ e = f->mt[ret].e;
+ if (unlikely(__nft_set_elem_expired(&e->ext, tstamp) ||
+ !nft_set_elem_active(&e->ext, genmask))) {
+ ret = pipapo_refill(res, f->bsize, f->rules,
+ fill, f->mt, last);
goto next_match;
}
- goto out;
+ scratch->map_index = map_index;
+ kernel_fpu_end();
+ __local_unlock_nested_bh(&scratch->bh_lock);
+ return e;
}
+ map_index = !map_index;
swap(res, fill);
- rp += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ data += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
-out:
- if (i % 2)
- scratch->map_index = !map_index;
kernel_fpu_end();
+ __local_unlock_nested_bh(&scratch->bh_lock);
+ return NULL;
+}
+
+/**
+ * nft_pipapo_avx2_lookup() - Dataplane frontend for AVX2 implementation
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @key: nftables API element representation containing key data
+ *
+ * This function is called from the data path. It will search for
+ * an element matching the given key in the current active copy using
+ * the AVX2 routines if the FPU is usable or fall back to the generic
+ * implementation of the algorithm otherwise.
+ *
+ * Return: nftables API extension pointer or NULL if no match.
+ */
+const struct nft_set_ext *
+nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_pipapo_match *m;
+ const u8 *rp = (const u8 *)key;
+ const struct nft_pipapo_elem *e;
+
+ local_bh_disable();
+
+ if (unlikely(!irq_fpu_usable())) {
+ const struct nft_set_ext *ext;
+
+ ext = nft_pipapo_lookup(net, set, key);
+
+ local_bh_enable();
+ return ext;
+ }
+
+ m = rcu_dereference(priv->match);
+
+ e = pipapo_get_avx2(m, rp, 0, get_jiffies_64());
local_bh_enable();
- return ret >= 0;
+ return e ? &e->ext : NULL;
}
diff --git a/net/netfilter/nft_set_pipapo_avx2.h b/net/netfilter/nft_set_pipapo_avx2.h
index dbb6aaca8a7a..c2999b63da3f 100644
--- a/net/netfilter/nft_set_pipapo_avx2.h
+++ b/net/netfilter/nft_set_pipapo_avx2.h
@@ -5,8 +5,12 @@
#include <asm/fpu/xstate.h>
#define NFT_PIPAPO_ALIGN (XSAVE_YMM_SIZE / BITS_PER_BYTE)
+struct nft_pipapo_match;
bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est);
+struct nft_pipapo_elem *pipapo_get_avx2(const struct nft_pipapo_match *m,
+ const u8 *data, u8 genmask,
+ u64 tstamp);
#endif /* defined(CONFIG_X86_64) && !defined(CONFIG_UML) */
#endif /* _NFT_SET_PIPAPO_AVX2_H */
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 2e8ef16ff191..ca594161b840 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -52,9 +52,9 @@ static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
return nft_set_elem_expired(&rbe->ext);
}
-static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext,
- unsigned int seq)
+static const struct nft_set_ext *
+__nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, unsigned int seq)
{
struct nft_rbtree *priv = nft_set_priv(set);
const struct nft_rbtree_elem *rbe, *interval = NULL;
@@ -65,7 +65,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
parent = rcu_dereference_raw(priv->root.rb_node);
while (parent != NULL) {
if (read_seqcount_retry(&priv->count, seq))
- return false;
+ return NULL;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
@@ -77,7 +77,9 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(interval))
continue;
- interval = rbe;
+ if (nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_rbtree_elem_expired(rbe))
+ interval = rbe;
} else if (d > 0)
parent = rcu_dereference_raw(parent->rb_right);
else {
@@ -87,50 +89,46 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
}
if (nft_rbtree_elem_expired(rbe))
- return false;
+ return NULL;
if (nft_rbtree_interval_end(rbe)) {
if (nft_set_is_anonymous(set))
- return false;
+ return NULL;
parent = rcu_dereference_raw(parent->rb_left);
interval = NULL;
continue;
}
- *ext = &rbe->ext;
- return true;
+ return &rbe->ext;
}
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
- nft_set_elem_active(&interval->ext, genmask) &&
- !nft_rbtree_elem_expired(interval) &&
- nft_rbtree_interval_start(interval)) {
- *ext = &interval->ext;
- return true;
- }
+ nft_rbtree_interval_start(interval))
+ return &interval->ext;
- return false;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE
-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
+const struct nft_set_ext *
+nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
struct nft_rbtree *priv = nft_set_priv(set);
unsigned int seq = read_seqcount_begin(&priv->count);
- bool ret;
+ const struct nft_set_ext *ext;
- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
- if (ret || !read_seqcount_retry(&priv->count, seq))
- return ret;
+ ext = __nft_rbtree_lookup(net, set, key, seq);
+ if (ext || !read_seqcount_retry(&priv->count, seq))
+ return ext;
read_lock_bh(&priv->lock);
seq = read_seqcount_begin(&priv->count);
- ret = __nft_rbtree_lookup(net, set, key, ext, seq);
+ ext = __nft_rbtree_lookup(net, set, key, seq);
read_unlock_bh(&priv->lock);
- return ret;
+ return ext;
}
static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
@@ -586,15 +584,14 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
return NULL;
}
-static void nft_rbtree_walk(const struct nft_ctx *ctx,
- struct nft_set *set,
- struct nft_set_iter *iter)
+static void nft_rbtree_do_walk(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ struct nft_set_iter *iter)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe;
struct rb_node *node;
- read_lock_bh(&priv->lock);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
rbe = rb_entry(node, struct nft_rbtree_elem, node);
@@ -602,14 +599,34 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
goto cont;
iter->err = iter->fn(ctx, set, iter, &rbe->priv);
- if (iter->err < 0) {
- read_unlock_bh(&priv->lock);
+ if (iter->err < 0)
return;
- }
cont:
iter->count++;
}
- read_unlock_bh(&priv->lock);
+}
+
+static void nft_rbtree_walk(const struct nft_ctx *ctx,
+ struct nft_set *set,
+ struct nft_set_iter *iter)
+{
+ struct nft_rbtree *priv = nft_set_priv(set);
+
+ switch (iter->type) {
+ case NFT_ITER_UPDATE:
+ lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
+ nft_rbtree_do_walk(ctx, set, iter);
+ break;
+ case NFT_ITER_READ:
+ read_lock_bh(&priv->lock);
+ nft_rbtree_do_walk(ctx, set, iter);
+ read_unlock_bh(&priv->lock);
+ break;
+ default:
+ iter->err = -EINVAL;
+ WARN_ON_ONCE(1);
+ break;
+ }
}
static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 35d0409b0095..36affbb697c2 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -217,7 +217,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
level += err;
/* Implies a giant cgroup tree */
- if (WARN_ON_ONCE(level > 255))
+ if (level > 255)
return -EOPNOTSUPP;
priv->level = level;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 709840612f0d..90b7630421c4 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1317,12 +1317,13 @@ void xt_compat_unlock(u_int8_t af)
EXPORT_SYMBOL_GPL(xt_compat_unlock);
#endif
-DEFINE_PER_CPU(seqcount_t, xt_recseq);
-EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
-
struct static_key xt_tee_enabled __read_mostly;
EXPORT_SYMBOL_GPL(xt_tee_enabled);
+#ifdef CONFIG_NETFILTER_XTABLES_LEGACY
+DEFINE_PER_CPU(seqcount_t, xt_recseq);
+EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
+
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
unsigned int size;
@@ -1514,6 +1515,7 @@ void *xt_unregister_table(struct xt_table *table)
return private;
}
EXPORT_SYMBOL_GPL(xt_unregister_table);
+#endif
#ifdef CONFIG_PROC_FS
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
@@ -1897,6 +1899,7 @@ void xt_proto_fini(struct net *net, u_int8_t af)
}
EXPORT_SYMBOL_GPL(xt_proto_fini);
+#ifdef CONFIG_NETFILTER_XTABLES_LEGACY
/**
* xt_percpu_counter_alloc - allocate x_tables rule counter
*
@@ -1951,6 +1954,7 @@ void xt_percpu_counter_free(struct xt_counters *counters)
free_percpu((void __percpu *)pcnt);
}
EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
+#endif
static int __net_init xt_net_init(struct net *net)
{
@@ -1983,8 +1987,10 @@ static int __init xt_init(void)
unsigned int i;
int rv;
- for_each_possible_cpu(i) {
- seqcount_init(&per_cpu(xt_recseq, i));
+ if (IS_ENABLED(CONFIG_NETFILTER_XTABLES_LEGACY)) {
+ for_each_possible_cpu(i) {
+ seqcount_init(&per_cpu(xt_recseq, i));
+ }
}
xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 7c6bf1c16813..0ca1cdfc4095 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -38,8 +38,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
nfacct = nfnl_acct_find_get(par->net, info->name);
if (nfacct == NULL) {
- pr_info_ratelimited("accounting object `%s' does not exists\n",
- info->name);
+ pr_info_ratelimited("accounting object `%.*s' does not exist\n",
+ NFACCT_NAME_MAX, info->name);
return -ENOENT;
}
info->nfacct = nfacct;
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 0d04d23aafe7..0da652844dd6 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -84,7 +84,6 @@ struct audit_buffer *netlbl_audit_start_common(int type,
struct netlbl_audit *audit_info)
{
struct audit_buffer *audit_buf;
- struct lsm_context ctx;
if (audit_enabled == AUDIT_OFF)
return NULL;
@@ -96,12 +95,7 @@ struct audit_buffer *netlbl_audit_start_common(int type,
audit_log_format(audit_buf, "netlabel: auid=%u ses=%u",
from_kuid(&init_user_ns, audit_info->loginuid),
audit_info->sessionid);
-
- if (lsmprop_is_set(&audit_info->prop) &&
- security_lsmprop_to_secctx(&audit_info->prop, &ctx) > 0) {
- audit_log_format(audit_buf, " subj=%s", ctx.context);
- security_release_secctx(&ctx);
- }
+ audit_log_subj_ctx(audit_buf, &audit_info->prop);
return audit_buf;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index e8972a857e51..2b46c0cd752a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -356,7 +356,7 @@ static void netlink_overrun(struct sock *sk)
sk_error_report(sk);
}
}
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
}
static void netlink_rcv_wake(struct sock *sk)
@@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
WARN_ON(skb->sk != NULL);
skb->sk = sk;
skb->destructor = netlink_skb_destructor;
- atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
@@ -1212,41 +1211,48 @@ struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk)
{
+ DECLARE_WAITQUEUE(wait, current);
struct netlink_sock *nlk;
+ unsigned int rmem;
nlk = nlk_sk(sk);
+ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
- if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
- DECLARE_WAITQUEUE(wait, current);
- if (!*timeo) {
- if (!ssk || netlink_is_kernel(ssk))
- netlink_overrun(sk);
- sock_put(sk);
- kfree_skb(skb);
- return -EAGAIN;
- }
-
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&nlk->wait, &wait);
+ if ((rmem == skb->truesize || rmem <= READ_ONCE(sk->sk_rcvbuf)) &&
+ !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
+ netlink_skb_set_owner_r(skb, sk);
+ return 0;
+ }
- if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
- !sock_flag(sk, SOCK_DEAD))
- *timeo = schedule_timeout(*timeo);
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&nlk->wait, &wait);
+ if (!*timeo) {
+ if (!ssk || netlink_is_kernel(ssk))
+ netlink_overrun(sk);
sock_put(sk);
+ kfree_skb(skb);
+ return -EAGAIN;
+ }
- if (signal_pending(current)) {
- kfree_skb(skb);
- return sock_intr_errno(*timeo);
- }
- return 1;
+ __set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&nlk->wait, &wait);
+ rmem = atomic_read(&sk->sk_rmem_alloc);
+
+ if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) ||
+ test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
+ !sock_flag(sk, SOCK_DEAD))
+ *timeo = schedule_timeout(*timeo);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&nlk->wait, &wait);
+ sock_put(sk);
+
+ if (signal_pending(current)) {
+ kfree_skb(skb);
+ return sock_intr_errno(*timeo);
}
- netlink_skb_set_owner_r(skb, sk);
- return 0;
+
+ return 1;
}
static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
@@ -1307,6 +1313,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
ret = skb->len;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
netlink_skb_set_owner_r(skb, sk);
NETLINK_CB(skb).sk = ssk;
netlink_deliver_tap_kernel(sk, ssk, skb);
@@ -1383,13 +1390,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
+ unsigned int rmem, rcvbuf;
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+
+ if ((rmem == skb->truesize || rmem <= rcvbuf) &&
!test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
+ return rmem > (rcvbuf >> 1);
}
+
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
return -1;
}
@@ -2245,6 +2258,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
struct netlink_ext_ack extack = {};
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
+ unsigned int rmem, rcvbuf;
size_t max_recvmsg_len;
struct module *module;
int err = -ENOBUFS;
@@ -2258,9 +2272,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
goto errout_skb;
}
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
- goto errout_skb;
-
/* NLMSG_GOODSIZE is small to avoid high order allocations being
* required, but it makes sense to _attempt_ a 32KiB allocation
* to reduce number of system calls on dump operations, if user
@@ -2283,6 +2294,13 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
if (!skb)
goto errout_skb;
+ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+ rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
+ if (rmem != skb->truesize && rmem >= rcvbuf) {
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ goto errout_skb;
+ }
+
/* Trim skb to allocated size. User is expected to provide buffer as
* large as max(min_dump_alloc, 32KiB (max_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as
@@ -2455,7 +2473,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
unsigned int flags = 0;
size_t tlvlen;
- /* Error messages get the original request appened, unless the user
+ /* Error messages get the original request appended, unless the user
* requests to cap the error message, and get extra error data if
* requested.
*/
@@ -2693,7 +2711,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
READ_ONCE(nlk->cb_running),
refcount_read(&s->sk_refcnt),
- atomic_read(&s->sk_drops),
+ sk_drops_read(s),
sock_i_ino(s)
);
@@ -2869,8 +2887,7 @@ static const struct rhashtable_params netlink_rhashtable_params = {
};
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
-BTF_ID_LIST(btf_netlink_sock_id)
-BTF_ID(struct, netlink_sock)
+BTF_ID_LIST_SINGLE(btf_netlink_sock_id, struct, netlink_sock)
static const struct bpf_iter_seq_info netlink_seq_info = {
.seq_ops = &netlink_seq_ops,
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 61981e01fd6f..b8e58132e8af 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -168,7 +168,7 @@ mc_list:
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
- __sock_i_ino(sk)) < 0) {
+ sock_i_ino(sk)) < 0) {
ret = 1;
break;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 104732d34543..978c129c6095 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (ret)
+ break;
+
if (family->bind)
family->bind(i);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index a818eff27e6b..418b84e2b260 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -27,11 +27,16 @@
/* Handle NCI Notification packets */
-static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_core_reset_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
/* Handle NCI 2.x core reset notification */
- const struct nci_core_reset_ntf *ntf = (void *)skb->data;
+ const struct nci_core_reset_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_core_reset_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_reset_ntf *)skb->data;
ndev->nci_ver = ntf->nci_ver;
pr_debug("nci_ver 0x%x, config_status 0x%x\n",
@@ -42,15 +47,22 @@ static void nci_core_reset_ntf_packet(struct nci_dev *ndev,
__le32_to_cpu(ntf->manufact_specific_info);
nci_req_complete(ndev, NCI_STATUS_OK);
+
+ return 0;
}
-static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static int nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+ struct nci_core_conn_credit_ntf *ntf;
struct nci_conn_info *conn_info;
int i;
+ if (skb->len < sizeof(struct nci_core_conn_credit_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_conn_credit_ntf *)skb->data;
+
pr_debug("num_entries %d\n", ntf->num_entries);
if (ntf->num_entries > NCI_MAX_NUM_CONN)
@@ -68,7 +80,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
conn_info = nci_get_conn_info_by_conn_id(ndev,
ntf->conn_entries[i].conn_id);
if (!conn_info)
- return;
+ return 0;
atomic_add(ntf->conn_entries[i].credits,
&conn_info->credits_cnt);
@@ -77,12 +89,19 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
/* trigger the next tx */
if (!skb_queue_empty(&ndev->tx_q))
queue_work(ndev->tx_wq, &ndev->tx_work);
+
+ return 0;
}
-static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
- __u8 status = skb->data[0];
+ __u8 status;
+
+ if (skb->len < 1)
+ return -EINVAL;
+
+ status = skb->data[0];
pr_debug("status 0x%x\n", status);
@@ -91,12 +110,19 @@ static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev,
(the state remains the same) */
nci_req_complete(ndev, status);
}
+
+ return 0;
}
-static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
- struct sk_buff *skb)
+static int nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
{
- struct nci_core_intf_error_ntf *ntf = (void *) skb->data;
+ struct nci_core_intf_error_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_core_intf_error_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_core_intf_error_ntf *)skb->data;
ntf->conn_id = nci_conn_id(&ntf->conn_id);
@@ -105,6 +131,8 @@ static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev,
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO);
+
+ return 0;
}
static const __u8 *
@@ -329,13 +357,18 @@ void nci_clear_target_list(struct nci_dev *ndev)
ndev->n_targets = 0;
}
-static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_discover_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
struct nci_rf_discover_ntf ntf;
- const __u8 *data = skb->data;
+ const __u8 *data;
bool add_target = true;
+ if (skb->len < sizeof(struct nci_rf_discover_ntf))
+ return -EINVAL;
+
+ data = skb->data;
+
ntf.rf_discovery_id = *data++;
ntf.rf_protocol = *data++;
ntf.rf_tech_and_mode = *data++;
@@ -390,6 +423,8 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, ndev->targets,
ndev->n_targets);
}
+
+ return 0;
}
static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
@@ -553,14 +588,19 @@ static int nci_store_ats_nfc_iso_dep(struct nci_dev *ndev,
return NCI_STATUS_OK;
}
-static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
struct nci_conn_info *conn_info;
struct nci_rf_intf_activated_ntf ntf;
- const __u8 *data = skb->data;
+ const __u8 *data;
int err = NCI_STATUS_OK;
+ if (skb->len < sizeof(struct nci_rf_intf_activated_ntf))
+ return -EINVAL;
+
+ data = skb->data;
+
ntf.rf_discovery_id = *data++;
ntf.rf_interface = *data++;
ntf.rf_protocol = *data++;
@@ -667,7 +707,7 @@ exit:
if (err == NCI_STATUS_OK) {
conn_info = ndev->rf_conn_info;
if (!conn_info)
- return;
+ return 0;
conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size;
conn_info->initial_num_credits = ntf.initial_num_credits;
@@ -721,19 +761,26 @@ listen:
pr_err("error when signaling tm activation\n");
}
}
+
+ return 0;
}
-static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
const struct nci_conn_info *conn_info;
- const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data;
+ const struct nci_rf_deactivate_ntf *ntf;
+
+ if (skb->len < sizeof(struct nci_rf_deactivate_ntf))
+ return -EINVAL;
+
+ ntf = (struct nci_rf_deactivate_ntf *)skb->data;
pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
conn_info = ndev->rf_conn_info;
if (!conn_info)
- return;
+ return 0;
/* drop tx data queue */
skb_queue_purge(&ndev->tx_q);
@@ -765,14 +812,20 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
}
nci_req_complete(ndev, NCI_STATUS_OK);
+
+ return 0;
}
-static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
- const struct sk_buff *skb)
+static int nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
+ const struct sk_buff *skb)
{
u8 status = NCI_STATUS_OK;
- const struct nci_nfcee_discover_ntf *nfcee_ntf =
- (struct nci_nfcee_discover_ntf *)skb->data;
+ const struct nci_nfcee_discover_ntf *nfcee_ntf;
+
+ if (skb->len < sizeof(struct nci_nfcee_discover_ntf))
+ return -EINVAL;
+
+ nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data;
/* NFCForum NCI 9.2.1 HCI Network Specific Handling
* If the NFCC supports the HCI Network, it SHALL return one,
@@ -783,6 +836,8 @@ static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev,
ndev->cur_params.id = nfcee_ntf->nfcee_id;
nci_req_complete(ndev, status);
+
+ return 0;
}
void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
@@ -809,35 +864,43 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
switch (ntf_opcode) {
case NCI_OP_CORE_RESET_NTF:
- nci_core_reset_ntf_packet(ndev, skb);
+ if (nci_core_reset_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_CONN_CREDITS_NTF:
- nci_core_conn_credits_ntf_packet(ndev, skb);
+ if (nci_core_conn_credits_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_GENERIC_ERROR_NTF:
- nci_core_generic_error_ntf_packet(ndev, skb);
+ if (nci_core_generic_error_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_CORE_INTF_ERROR_NTF:
- nci_core_conn_intf_error_ntf_packet(ndev, skb);
+ if (nci_core_conn_intf_error_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_DISCOVER_NTF:
- nci_rf_discover_ntf_packet(ndev, skb);
+ if (nci_rf_discover_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_INTF_ACTIVATED_NTF:
- nci_rf_intf_activated_ntf_packet(ndev, skb);
+ if (nci_rf_intf_activated_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_DEACTIVATE_NTF:
- nci_rf_deactivate_ntf_packet(ndev, skb);
+ if (nci_rf_deactivate_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_NFCEE_DISCOVER_NTF:
- nci_nfcee_discover_ntf_packet(ndev, skb);
+ if (nci_nfcee_discover_ntf_packet(ndev, skb))
+ goto end;
break;
case NCI_OP_RF_NFCEE_ACTION_NTF:
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index ed1508a9e093..aab107727f18 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -119,22 +119,22 @@ static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver)
memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart));
nu->tty = tty;
- tty->disc_data = nu;
skb_queue_head_init(&nu->tx_q);
INIT_WORK(&nu->write_work, nci_uart_write_work);
spin_lock_init(&nu->rx_lock);
ret = nu->ops.open(nu);
if (ret) {
- tty->disc_data = NULL;
kfree(nu);
+ return ret;
} else if (!try_module_get(nu->owner)) {
nu->ops.close(nu);
- tty->disc_data = NULL;
kfree(nu);
return -ENOENT;
}
- return ret;
+ tty->disc_data = nu;
+
+ return 0;
}
/* ------ LDISC part ------ */
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6a40b8d0350d..a18e2c503da6 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1192,7 +1192,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
continue;
uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]);
- if (uri == NULL || *uri == 0)
+ if (*uri == 0)
continue;
tid = local->sdreq_next_tid++;
@@ -1540,10 +1540,6 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
}
apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
- if (!apdu) {
- rc = -EINVAL;
- goto put_dev;
- }
ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
if (!ctx) {
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e7269a3eec79..2832e0794197 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -39,16 +39,14 @@
#include "flow_netlink.h"
#include "openvswitch_trace.h"
-DEFINE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage) = {
- .bh_lock = INIT_LOCAL_LOCK(bh_lock),
-};
+struct ovs_pcpu_storage __percpu *ovs_pcpu_storage;
/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
* space. Return NULL if out of key spaces.
*/
static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
{
- struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(&ovs_pcpu_storage);
+ struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage);
struct action_flow_keys *keys = &ovs_pcpu->flow_keys;
int level = ovs_pcpu->exec_level;
struct sw_flow_key *key = NULL;
@@ -94,7 +92,7 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
const struct nlattr *actions,
const int actions_len)
{
- struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos);
+ struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
struct deferred_action *da;
da = action_fifo_put(fifo);
@@ -755,7 +753,7 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
static int ovs_vport_output(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
- struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
+ struct ovs_frag_data *data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
struct vport *vport = data->vport;
if (skb_cow_head(skb, data->l2_len) < 0) {
@@ -807,7 +805,7 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb,
unsigned int hlen = skb_network_offset(skb);
struct ovs_frag_data *data;
- data = this_cpu_ptr(&ovs_pcpu_storage.frag_data);
+ data = this_cpu_ptr(&ovs_pcpu_storage->frag_data);
data->dst = skb->_skb_refdst;
data->vport = vport;
data->cb = *OVS_CB(skb);
@@ -943,8 +941,10 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
break;
case OVS_USERSPACE_ATTR_PID:
- if (dp->user_features &
- OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
+ if (OVS_CB(skb)->upcall_pid)
+ upcall.portid = OVS_CB(skb)->upcall_pid;
+ else if (dp->user_features &
+ OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
upcall.portid =
ovs_dp_get_upcall_portid(dp,
smp_processor_id());
@@ -1566,16 +1566,15 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb,
clone = clone_flow_key ? clone_key(key) : key;
if (clone) {
int err = 0;
-
if (actions) { /* Sample action */
if (clone_flow_key)
- __this_cpu_inc(ovs_pcpu_storage.exec_level);
+ __this_cpu_inc(ovs_pcpu_storage->exec_level);
err = do_execute_actions(dp, skb, clone,
actions, len);
if (clone_flow_key)
- __this_cpu_dec(ovs_pcpu_storage.exec_level);
+ __this_cpu_dec(ovs_pcpu_storage->exec_level);
} else { /* Recirc action */
clone->recirc_id = recirc_id;
ovs_dp_process_packet(skb, clone);
@@ -1611,7 +1610,7 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb,
static void process_deferred_actions(struct datapath *dp)
{
- struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage.action_fifos);
+ struct action_fifo *fifo = this_cpu_ptr(&ovs_pcpu_storage->action_fifos);
/* Do not touch the FIFO in case there is no deferred actions. */
if (action_fifo_is_empty(fifo))
@@ -1642,7 +1641,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
{
int err, level;
- level = __this_cpu_inc_return(ovs_pcpu_storage.exec_level);
+ level = __this_cpu_inc_return(ovs_pcpu_storage->exec_level);
if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
@@ -1659,6 +1658,6 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
process_deferred_actions(dp);
out:
- __this_cpu_dec(ovs_pcpu_storage.exec_level);
+ __this_cpu_dec(ovs_pcpu_storage->exec_level);
return err;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6a304ae2d959..d5b6e2002bc1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -244,7 +244,7 @@ void ovs_dp_detach_port(struct vport *p)
/* Must be called with rcu_read_lock. */
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{
- struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(&ovs_pcpu_storage);
+ struct ovs_pcpu_storage *ovs_pcpu = this_cpu_ptr(ovs_pcpu_storage);
const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
@@ -267,7 +267,9 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
- if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
+ if (OVS_CB(skb)->upcall_pid)
+ upcall.portid = OVS_CB(skb)->upcall_pid;
+ else if (dp->user_features & OVS_DP_F_DISPATCH_UPCALL_PER_CPU)
upcall.portid =
ovs_dp_get_upcall_portid(dp, smp_processor_id());
else
@@ -299,7 +301,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
* avoided.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT) && ovs_pcpu->owner != current) {
- local_lock_nested_bh(&ovs_pcpu_storage.bh_lock);
+ local_lock_nested_bh(&ovs_pcpu_storage->bh_lock);
ovs_pcpu->owner = current;
ovs_pcpu_locked = true;
}
@@ -310,7 +312,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
ovs_dp_name(dp), error);
if (ovs_pcpu_locked) {
ovs_pcpu->owner = NULL;
- local_unlock_nested_bh(&ovs_pcpu_storage.bh_lock);
+ local_unlock_nested_bh(&ovs_pcpu_storage->bh_lock);
}
stats_counter = &stats->n_hit;
@@ -651,6 +653,9 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
!!(hash & OVS_PACKET_HASH_L4_BIT));
}
+ OVS_CB(packet)->upcall_pid =
+ nla_get_u32_default(a[OVS_PACKET_ATTR_UPCALL_PID], 0);
+
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
@@ -689,13 +694,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
- local_lock_nested_bh(&ovs_pcpu_storage.bh_lock);
+ local_lock_nested_bh(&ovs_pcpu_storage->bh_lock);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_write(ovs_pcpu_storage.owner, current);
+ this_cpu_write(ovs_pcpu_storage->owner, current);
err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_write(ovs_pcpu_storage.owner, NULL);
- local_unlock_nested_bh(&ovs_pcpu_storage.bh_lock);
+ this_cpu_write(ovs_pcpu_storage->owner, NULL);
+ local_unlock_nested_bh(&ovs_pcpu_storage->bh_lock);
local_bh_enable();
rcu_read_unlock();
@@ -719,6 +724,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
[OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
+ [OVS_PACKET_ATTR_UPCALL_PID] = { .type = NLA_U32 },
};
static const struct genl_small_ops dp_packet_genl_ops[] = {
@@ -2744,6 +2750,28 @@ static struct drop_reason_list drop_reason_list_ovs = {
.n_reasons = ARRAY_SIZE(ovs_drop_reasons),
};
+static int __init ovs_alloc_percpu_storage(void)
+{
+ unsigned int cpu;
+
+ ovs_pcpu_storage = alloc_percpu(*ovs_pcpu_storage);
+ if (!ovs_pcpu_storage)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ struct ovs_pcpu_storage *ovs_pcpu;
+
+ ovs_pcpu = per_cpu_ptr(ovs_pcpu_storage, cpu);
+ local_lock_init(&ovs_pcpu->bh_lock);
+ }
+ return 0;
+}
+
+static void ovs_free_percpu_storage(void)
+{
+ free_percpu(ovs_pcpu_storage);
+}
+
static int __init dp_init(void)
{
int err;
@@ -2753,6 +2781,10 @@ static int __init dp_init(void)
pr_info("Open vSwitch switching datapath\n");
+ err = ovs_alloc_percpu_storage();
+ if (err)
+ goto error;
+
err = ovs_internal_dev_rtnl_link_register();
if (err)
goto error;
@@ -2799,6 +2831,7 @@ error_flow_exit:
error_unreg_rtnl_link:
ovs_internal_dev_rtnl_link_unregister();
error:
+ ovs_free_percpu_storage();
return err;
}
@@ -2813,6 +2846,7 @@ static void dp_cleanup(void)
ovs_vport_exit();
ovs_flow_exit();
ovs_internal_dev_rtnl_link_unregister();
+ ovs_free_percpu_storage();
}
module_init(dp_init);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 1b5348b0f559..db0c3e69d66c 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -121,6 +121,8 @@ struct datapath {
* @cutlen: The number of bytes from the packet end to be removed.
* @probability: The sampling probability that was applied to this skb; 0 means
* no sampling has occurred; U32_MAX means 100% probability.
+ * @upcall_pid: Netlink socket PID to use for sending this packet to userspace;
+ * 0 means "not set" and default per-CPU or per-vport dispatch should be used.
*/
struct ovs_skb_cb {
struct vport *input_vport;
@@ -128,6 +130,7 @@ struct ovs_skb_cb {
u16 acts_origlen;
u32 cutlen;
u32 probability;
+ u32 upcall_pid;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -220,7 +223,8 @@ struct ovs_pcpu_storage {
struct task_struct *owner;
local_lock_t bh_lock;
};
-DECLARE_PER_CPU(struct ovs_pcpu_storage, ovs_pcpu_storage);
+
+extern struct ovs_pcpu_storage __percpu *ovs_pcpu_storage;
/**
* enum ovs_pkt_hash_types - hash info to include with a packet
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 7af0cde8b293..a2af90ee99af 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -75,7 +75,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
/* schedule vport destroy, dev_put and genl notification */
ovs_net = net_generic(dev_net(dev), ovs_net_id);
- queue_work(system_wq, &ovs_net->dp_notify_work);
+ queue_work(system_percpu_wq, &ovs_net->dp_notify_work);
}
return NOTIFY_DONE;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index b80bd3a90773..66366982f604 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -129,15 +129,13 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- int cpu;
+ unsigned int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
- /* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
+ for_each_cpu(cpu, flow->cpu_used_mask) {
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
@@ -158,11 +156,9 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* Called with ovs_mutex. */
void ovs_flow_stats_clear(struct sw_flow *flow)
{
- int cpu;
+ unsigned int cpu;
- /* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
+ for_each_cpu(cpu, flow->cpu_used_mask) {
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d108ae0bd0ee..ffc72a741a50 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -107,16 +107,15 @@ int ovs_flow_tbl_count(const struct flow_table *table)
static void flow_free(struct sw_flow *flow)
{
- int cpu;
+ unsigned int cpu;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
flow->sf_acts);
- /* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids;
- cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
+
+ for_each_cpu(cpu, flow->cpu_used_mask) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 8732f6e51ae5..6bbbc16ab778 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -501,6 +501,7 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
OVS_CB(skb)->mru = 0;
OVS_CB(skb)->cutlen = 0;
OVS_CB(skb)->probability = 0;
+ OVS_CB(skb)->upcall_pid = 0;
if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
u32 mark;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3d43f3eae759..173e6edda08f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -203,8 +203,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *,
static int prb_queue_frozen(struct tpacket_kbdq_core *);
static void prb_open_block(struct tpacket_kbdq_core *,
struct tpacket_block_desc *);
-static void prb_retire_rx_blk_timer_expired(struct timer_list *);
-static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
+static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *);
static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
static void prb_clear_rxhash(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
@@ -579,33 +578,13 @@ static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
return proto;
}
-static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
-{
- timer_delete_sync(&pkc->retire_blk_timer);
-}
-
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
-
- spin_lock_bh(&rb_queue->lock);
- pkc->delete_blk_timer = 1;
- spin_unlock_bh(&rb_queue->lock);
-
- prb_del_retire_blk_timer(pkc);
-}
-
-static void prb_setup_retire_blk_timer(struct packet_sock *po)
-{
- struct tpacket_kbdq_core *pkc;
-
- pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
- timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
- 0);
- pkc->retire_blk_timer.expires = jiffies;
+ hrtimer_cancel(&pkc->retire_blk_timer);
}
static int prb_calc_retire_blk_tmo(struct packet_sock *po,
@@ -669,57 +648,36 @@ static void init_prb_bdqc(struct packet_sock *po,
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
p1->version = po->tp_version;
- p1->last_kactive_blk_num = 0;
po->stats.stats3.tp_freeze_q_cnt = 0;
if (req_u->req3.tp_retire_blk_tov)
- p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
+ p1->interval_ktime = ms_to_ktime(req_u->req3.tp_retire_blk_tov);
else
- p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
- req_u->req3.tp_block_size);
- p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+ p1->interval_ktime = ms_to_ktime(prb_calc_retire_blk_tmo(po,
+ req_u->req3.tp_block_size));
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
rwlock_init(&p1->blk_fill_in_prog_lock);
p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
- prb_setup_retire_blk_timer(po);
+ hrtimer_setup(&p1->retire_blk_timer, prb_retire_rx_blk_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ hrtimer_start(&p1->retire_blk_timer, p1->interval_ktime,
+ HRTIMER_MODE_REL_SOFT);
prb_open_block(p1, pbd);
}
-/* Do NOT update the last_blk_num first.
- * Assumes sk_buff_head lock is held.
- */
-static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
-{
- mod_timer(&pkc->retire_blk_timer,
- jiffies + pkc->tov_in_jiffies);
- pkc->last_kactive_blk_num = pkc->kactive_blk_num;
-}
-
/*
- * Timer logic:
- * 1) We refresh the timer only when we open a block.
- * By doing this we don't waste cycles refreshing the timer
- * on packet-by-packet basis.
- *
* With a 1MB block-size, on a 1Gbps line, it will take
* i) ~8 ms to fill a block + ii) memcpy etc.
* In this cut we are not accounting for the memcpy time.
*
- * So, if the user sets the 'tmo' to 10ms then the timer
- * will never fire while the block is still getting filled
- * (which is what we want). However, the user could choose
- * to close a block early and that's fine.
- *
- * But when the timer does fire, we check whether or not to refresh it.
* Since the tmo granularity is in msecs, it is not too expensive
* to refresh the timer, lets say every '8' msecs.
* Either the user can set the 'tmo' or we can derive it based on
* a) line-speed and b) block-size.
* prb_calc_retire_blk_tmo() calculates the tmo.
- *
*/
-static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
+static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *t)
{
struct packet_sock *po =
timer_container_of(po, t, rx_ring.prb_bdqc.retire_blk_timer);
@@ -732,9 +690,6 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
frozen = prb_queue_frozen(pkc);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
- if (unlikely(pkc->delete_blk_timer))
- goto out;
-
/* We only need to plug the race when the block is partially filled.
* tpacket_rcv:
* lock(); increment BLOCK_NUM_PKTS; unlock()
@@ -750,46 +705,31 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
write_unlock(&pkc->blk_fill_in_prog_lock);
}
- if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
- if (!frozen) {
- if (!BLOCK_NUM_PKTS(pbd)) {
- /* An empty block. Just refresh the timer. */
- goto refresh_timer;
- }
+ if (!frozen) {
+ if (BLOCK_NUM_PKTS(pbd)) {
+ /* Not an empty block. Need retire the block. */
prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
- if (!prb_dispatch_next_block(pkc, po))
- goto refresh_timer;
- else
- goto out;
- } else {
- /* Case 1. Queue was frozen because user-space was
- * lagging behind.
+ prb_dispatch_next_block(pkc, po);
+ }
+ } else {
+ /* Case 1. Queue was frozen because user-space was
+ * lagging behind.
+ */
+ if (!prb_curr_blk_in_use(pbd)) {
+ /* Case 2. queue was frozen,user-space caught up,
+ * now the link went idle && the timer fired.
+ * We don't have a block to close.So we open this
+ * block and restart the timer.
+ * opening a block thaws the queue,restarts timer
+ * Thawing/timer-refresh is a side effect.
*/
- if (prb_curr_blk_in_use(pbd)) {
- /*
- * Ok, user-space is still behind.
- * So just refresh the timer.
- */
- goto refresh_timer;
- } else {
- /* Case 2. queue was frozen,user-space caught up,
- * now the link went idle && the timer fired.
- * We don't have a block to close.So we open this
- * block and restart the timer.
- * opening a block thaws the queue,restarts timer
- * Thawing/timer-refresh is a side effect.
- */
- prb_open_block(pkc, pbd);
- goto out;
- }
+ prb_open_block(pkc, pbd);
}
}
-refresh_timer:
- _prb_refresh_rx_retire_blk_timer(pkc);
-
-out:
+ hrtimer_forward_now(&pkc->retire_blk_timer, pkc->interval_ktime);
spin_unlock(&po->sk.sk_receive_queue.lock);
+ return HRTIMER_RESTART;
}
static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
@@ -883,11 +823,18 @@ static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
}
/*
- * Side effect of opening a block:
+ * prb_open_block is called by tpacket_rcv or timer callback.
*
- * 1) prb_queue is thawed.
- * 2) retire_blk_timer is refreshed.
+ * Reasons why NOT update hrtimer in prb_open_block:
+ * 1) It will increase complexity to distinguish the two caller scenario.
+ * 2) hrtimer_cancel and hrtimer_start need to be called if you want to update
+ * TMO of an already enqueued hrtimer, leading to complex shutdown logic.
*
+ * One side effect of NOT update hrtimer when called by tpacket_rcv is that
+ * a newly opened block triggered by tpacket_rcv may be retired earlier than
+ * expected. On the other hand, if timeout is updated in prb_open_block, the
+ * frequent reception of network packets that leads to prb_open_block being
+ * called may cause hrtimer to be removed and enqueued repeatedly.
*/
static void prb_open_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1)
@@ -921,7 +868,6 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
prb_thaw_queue(pkc1);
- _prb_refresh_rx_retire_blk_timer(pkc1);
smp_wmb();
}
@@ -2265,7 +2211,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
drop_n_acct:
atomic_inc(&po->tp_drops);
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
drop_n_restore:
@@ -2785,7 +2731,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
int hlen, tlen, copylen = 0;
- long timeo = 0;
+ long timeo;
mutex_lock(&po->pg_vec_lock);
@@ -2839,22 +2785,28 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
size_max = dev->mtu + reserve + VLAN_HLEN;
+ timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
reinit_completion(&po->skb_completion);
do {
ph = packet_current_frame(po, &po->tx_ring,
TP_STATUS_SEND_REQUEST);
if (unlikely(ph == NULL)) {
- if (need_wait && skb) {
- timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
+ /* Note: packet_read_pending() might be slow if we
+ * have to call it as it's per_cpu variable, but in
+ * fast-path we don't have to call it, only when ph
+ * is NULL, we need to check the pending_refcnt.
+ */
+ if (need_wait && packet_read_pending(&po->tx_ring)) {
timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
if (timeo <= 0) {
err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
goto out_put;
}
- }
- /* check for additional frames */
- continue;
+ /* check for additional frames */
+ continue;
+ } else
+ break;
}
skb = NULL;
@@ -2943,14 +2895,7 @@ tpacket_error:
}
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
- } while (likely((ph != NULL) ||
- /* Note: packet_read_pending() might be slow if we have
- * to call it as it's per_cpu variable, but in fast-path
- * we already short-circuit the loop with the first
- * condition, and luckily don't have to go that path
- * anyway.
- */
- (need_wait && packet_read_pending(&po->tx_ring))));
+ } while (1);
err = len_sum;
goto out_put;
@@ -4574,10 +4519,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
spin_lock(&po->bind_lock);
was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
num = po->num;
- if (was_running) {
- WRITE_ONCE(po->num, 0);
+ WRITE_ONCE(po->num, 0);
+ if (was_running)
__unregister_prot_hook(sk, false);
- }
+
spin_unlock(&po->bind_lock);
synchronize_net();
@@ -4609,10 +4554,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
- if (was_running) {
- WRITE_ONCE(po->num, num);
+ WRITE_ONCE(po->num, num);
+ if (was_running)
register_prot_hook(sk);
- }
+
spin_unlock(&po->bind_lock);
if (pg_vec && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
@@ -4783,7 +4728,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
READ_ONCE(po->ifindex),
packet_sock_flag(po, PACKET_SOCK_RUNNING),
atomic_read(&s->sk_rmem_alloc),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(s)),
sock_i_ino(s));
}
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 47f69f3dbf73..c8f43e0c1925 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -83,7 +83,7 @@ static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
pdr.pdr_frame_nr = ring->frame_max + 1;
if (ver > TPACKET_V2) {
- pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+ pdr.pdr_retire_tmo = ktime_to_ms(ring->prb_bdqc.interval_ktime);
pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
pdr.pdr_features = ring->prb_bdqc.feature_req_word;
} else {
@@ -153,7 +153,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
nla_put_u32(skb, PACKET_DIAG_UID,
- from_kuid_munged(user_ns, sock_i_uid(sk))))
+ from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 1e743d0316fd..b76e645cd78d 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -20,15 +20,10 @@ struct tpacket_kbdq_core {
unsigned int feature_req_word;
unsigned int hdrlen;
unsigned char reset_pending_on_curr_blk;
- unsigned char delete_blk_timer;
unsigned short kactive_blk_num;
unsigned short blk_sizeof_priv;
- /* last_kactive_blk_num:
- * trick to see if user-space has caught up
- * in order to avoid refreshing timer when every single pkt arrives.
- */
- unsigned short last_kactive_blk_num;
+ unsigned short version;
char *pkblk_start;
char *pkblk_end;
@@ -38,6 +33,7 @@ struct tpacket_kbdq_core {
uint64_t knxt_seq_num;
char *prev;
char *nxt_offset;
+
struct sk_buff *skb;
rwlock_t blk_fill_in_prog_lock;
@@ -45,12 +41,10 @@ struct tpacket_kbdq_core {
/* Default is set to 8ms */
#define DEFAULT_PRB_RETIRE_TOV (8)
- unsigned short retire_blk_tov;
- unsigned short version;
- unsigned long tov_in_jiffies;
+ ktime_t interval_ktime;
/* timer to retire an outstanding block */
- struct timer_list retire_blk_timer;
+ struct hrtimer retire_blk_timer;
};
struct pgv {
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index a27efa4faa4e..238a9638d2b0 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -22,7 +22,7 @@
#include <net/phonet/pn_dev.h>
/* Transport protocol registration */
-static const struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
+static const struct phonet_protocol __rcu *proto_tab[PHONET_NPROTO] __read_mostly;
static const struct phonet_protocol *phonet_proto_get(unsigned int protocol)
{
@@ -482,7 +482,7 @@ void phonet_proto_unregister(unsigned int protocol,
const struct phonet_protocol *pp)
{
mutex_lock(&proto_tab_lock);
- BUG_ON(proto_tab[protocol] != pp);
+ BUG_ON(rcu_access_pointer(proto_tab[protocol]) != pp);
RCU_INIT_POINTER(proto_tab[protocol], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 53a858478e22..4db564d9d522 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -376,7 +376,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
case PNS_PEP_CTRL_REQ:
if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
break;
}
__skb_pull(skb, 4);
@@ -397,7 +397,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
}
if (pn->rx_credits == 0) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
err = -ENOBUFS;
break;
}
@@ -567,7 +567,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
}
if (pn->rx_credits == 0) {
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
err = NET_RX_DROP;
break;
}
@@ -826,6 +826,7 @@ static struct sock *pep_sock_accept(struct sock *sk,
}
/* Check for duplicate pipe handle */
+ pn_skb_get_dst_sockaddr(skb, &dst);
newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
if (unlikely(newsk)) {
__sock_put(newsk);
@@ -850,7 +851,6 @@ static struct sock *pep_sock_accept(struct sock *sk,
newsk->sk_destruct = pipe_destruct;
newpn = pep_sk(newsk);
- pn_skb_get_dst_sockaddr(skb, &dst);
pn_skb_get_src_sockaddr(skb, &src);
newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 5ce0b3ee5def..db2d552e9b32 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -584,10 +584,10 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
sk->sk_protocol, pn->sobject, pn->dobject,
pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
- atomic_read(&sk->sk_drops));
+ sk_drops_read(sk));
}
seq_pad(seq, '\n');
return 0;
@@ -602,7 +602,7 @@ const struct seq_operations pn_sock_seq_ops = {
#endif
static struct {
- struct sock *sk[256];
+ struct sock __rcu *sk[256];
} pnres;
/*
@@ -654,7 +654,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
return -EPERM;
mutex_lock(&resource_mutex);
- if (pnres.sk[res] == sk) {
+ if (rcu_access_pointer(pnres.sk[res]) == sk) {
RCU_INIT_POINTER(pnres.sk[res], NULL);
ret = 0;
}
@@ -673,7 +673,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
mutex_lock(&resource_mutex);
for (res = 0; res < 256; res++) {
- if (pnres.sk[res] == sk) {
+ if (rcu_access_pointer(pnres.sk[res]) == sk) {
RCU_INIT_POINTER(pnres.sk[res], NULL);
match++;
}
@@ -688,7 +688,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
}
#ifdef CONFIG_PROC_FS
-static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
+static struct sock __rcu **pn_res_get_idx(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
unsigned int i;
@@ -697,7 +697,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
return NULL;
for (i = 0; i < 256; i++) {
- if (pnres.sk[i] == NULL)
+ if (rcu_access_pointer(pnres.sk[i]) == NULL)
continue;
if (!pos)
return pnres.sk + i;
@@ -706,7 +706,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
return NULL;
}
-static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
+static struct sock __rcu **pn_res_get_next(struct seq_file *seq, struct sock __rcu **sk)
{
struct net *net = seq_file_net(seq);
unsigned int i;
@@ -728,7 +728,7 @@ static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct sock **sk;
+ struct sock __rcu **sk;
if (v == SEQ_START_TOKEN)
sk = pn_res_get_idx(seq, 0);
@@ -747,15 +747,16 @@ static void pn_res_seq_stop(struct seq_file *seq, void *v)
static int pn_res_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 63);
- if (v == SEQ_START_TOKEN)
+ if (v == SEQ_START_TOKEN) {
seq_puts(seq, "rs uid inode");
- else {
- struct sock **psk = v;
- struct sock *sk = *psk;
+ } else {
+ struct sock __rcu **psk = v;
+ struct sock *sk = rcu_dereference_protected(*psk,
+ lockdep_is_held(&resource_mutex));
seq_printf(seq, "%02X %5u %lu",
(int) (psk - pnres.sk),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
}
seq_pad(seq, '\n');
diff --git a/net/psp/Kconfig b/net/psp/Kconfig
new file mode 100644
index 000000000000..371e8771f3bd
--- /dev/null
+++ b/net/psp/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PSP configuration
+#
+config INET_PSP
+ bool "PSP Security Protocol support"
+ depends on INET
+ select SKB_DECRYPTED
+ select SOCK_VALIDATE_XMIT
+ help
+ Enable kernel support for the PSP Security Protocol (PSP).
+ For more information see:
+ https://raw.githubusercontent.com/google/psp/main/doc/PSP_Arch_Spec.pdf
+
+ If unsure, say N.
diff --git a/net/psp/Makefile b/net/psp/Makefile
new file mode 100644
index 000000000000..eb5ff3c5bfb2
--- /dev/null
+++ b/net/psp/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_INET_PSP) += psp.o
+
+psp-y := psp_main.o psp_nl.o psp_sock.o psp-nl-gen.o
diff --git a/net/psp/psp-nl-gen.c b/net/psp/psp-nl-gen.c
new file mode 100644
index 000000000000..9fdd6f831803
--- /dev/null
+++ b/net/psp/psp-nl-gen.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/psp.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "psp-nl-gen.h"
+
+#include <uapi/linux/psp.h>
+
+/* Common nested types */
+const struct nla_policy psp_keys_nl_policy[PSP_A_KEYS_SPI + 1] = {
+ [PSP_A_KEYS_KEY] = { .type = NLA_BINARY, },
+ [PSP_A_KEYS_SPI] = { .type = NLA_U32, },
+};
+
+/* PSP_CMD_DEV_GET - do */
+static const struct nla_policy psp_dev_get_nl_policy[PSP_A_DEV_ID + 1] = {
+ [PSP_A_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+/* PSP_CMD_DEV_SET - do */
+static const struct nla_policy psp_dev_set_nl_policy[PSP_A_DEV_PSP_VERSIONS_ENA + 1] = {
+ [PSP_A_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+ [PSP_A_DEV_PSP_VERSIONS_ENA] = NLA_POLICY_MASK(NLA_U32, 0xf),
+};
+
+/* PSP_CMD_KEY_ROTATE - do */
+static const struct nla_policy psp_key_rotate_nl_policy[PSP_A_DEV_ID + 1] = {
+ [PSP_A_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+/* PSP_CMD_RX_ASSOC - do */
+static const struct nla_policy psp_rx_assoc_nl_policy[PSP_A_ASSOC_SOCK_FD + 1] = {
+ [PSP_A_ASSOC_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+ [PSP_A_ASSOC_VERSION] = NLA_POLICY_MAX(NLA_U32, 3),
+ [PSP_A_ASSOC_SOCK_FD] = { .type = NLA_U32, },
+};
+
+/* PSP_CMD_TX_ASSOC - do */
+static const struct nla_policy psp_tx_assoc_nl_policy[PSP_A_ASSOC_SOCK_FD + 1] = {
+ [PSP_A_ASSOC_DEV_ID] = NLA_POLICY_MIN(NLA_U32, 1),
+ [PSP_A_ASSOC_VERSION] = NLA_POLICY_MAX(NLA_U32, 3),
+ [PSP_A_ASSOC_TX_KEY] = NLA_POLICY_NESTED(psp_keys_nl_policy),
+ [PSP_A_ASSOC_SOCK_FD] = { .type = NLA_U32, },
+};
+
+/* Ops table for psp */
+static const struct genl_split_ops psp_nl_ops[] = {
+ {
+ .cmd = PSP_CMD_DEV_GET,
+ .pre_doit = psp_device_get_locked,
+ .doit = psp_nl_dev_get_doit,
+ .post_doit = psp_device_unlock,
+ .policy = psp_dev_get_nl_policy,
+ .maxattr = PSP_A_DEV_ID,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = PSP_CMD_DEV_GET,
+ .dumpit = psp_nl_dev_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = PSP_CMD_DEV_SET,
+ .pre_doit = psp_device_get_locked,
+ .doit = psp_nl_dev_set_doit,
+ .post_doit = psp_device_unlock,
+ .policy = psp_dev_set_nl_policy,
+ .maxattr = PSP_A_DEV_PSP_VERSIONS_ENA,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = PSP_CMD_KEY_ROTATE,
+ .pre_doit = psp_device_get_locked,
+ .doit = psp_nl_key_rotate_doit,
+ .post_doit = psp_device_unlock,
+ .policy = psp_key_rotate_nl_policy,
+ .maxattr = PSP_A_DEV_ID,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = PSP_CMD_RX_ASSOC,
+ .pre_doit = psp_assoc_device_get_locked,
+ .doit = psp_nl_rx_assoc_doit,
+ .post_doit = psp_device_unlock,
+ .policy = psp_rx_assoc_nl_policy,
+ .maxattr = PSP_A_ASSOC_SOCK_FD,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = PSP_CMD_TX_ASSOC,
+ .pre_doit = psp_assoc_device_get_locked,
+ .doit = psp_nl_tx_assoc_doit,
+ .post_doit = psp_device_unlock,
+ .policy = psp_tx_assoc_nl_policy,
+ .maxattr = PSP_A_ASSOC_SOCK_FD,
+ .flags = GENL_CMD_CAP_DO,
+ },
+};
+
+static const struct genl_multicast_group psp_nl_mcgrps[] = {
+ [PSP_NLGRP_MGMT] = { "mgmt", },
+ [PSP_NLGRP_USE] = { "use", },
+};
+
+struct genl_family psp_nl_family __ro_after_init = {
+ .name = PSP_FAMILY_NAME,
+ .version = PSP_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = psp_nl_ops,
+ .n_split_ops = ARRAY_SIZE(psp_nl_ops),
+ .mcgrps = psp_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(psp_nl_mcgrps),
+};
diff --git a/net/psp/psp-nl-gen.h b/net/psp/psp-nl-gen.h
new file mode 100644
index 000000000000..25268ed11fb5
--- /dev/null
+++ b/net/psp/psp-nl-gen.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/psp.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_PSP_GEN_H
+#define _LINUX_PSP_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/psp.h>
+
+/* Common nested types */
+extern const struct nla_policy psp_keys_nl_policy[PSP_A_KEYS_SPI + 1];
+
+int psp_device_get_locked(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+int psp_assoc_device_get_locked(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+void
+psp_device_unlock(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info);
+
+int psp_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
+int psp_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int psp_nl_dev_set_doit(struct sk_buff *skb, struct genl_info *info);
+int psp_nl_key_rotate_doit(struct sk_buff *skb, struct genl_info *info);
+int psp_nl_rx_assoc_doit(struct sk_buff *skb, struct genl_info *info);
+int psp_nl_tx_assoc_doit(struct sk_buff *skb, struct genl_info *info);
+
+enum {
+ PSP_NLGRP_MGMT,
+ PSP_NLGRP_USE,
+};
+
+extern struct genl_family psp_nl_family;
+
+#endif /* _LINUX_PSP_GEN_H */
diff --git a/net/psp/psp.h b/net/psp/psp.h
new file mode 100644
index 000000000000..9f19137593a0
--- /dev/null
+++ b/net/psp/psp.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PSP_H
+#define __PSP_PSP_H
+
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <net/netns/generic.h>
+#include <net/psp.h>
+#include <net/sock.h>
+
+extern struct xarray psp_devs;
+extern struct mutex psp_devs_lock;
+
+void psp_dev_free(struct psp_dev *psd);
+int psp_dev_check_access(struct psp_dev *psd, struct net *net);
+
+void psp_nl_notify_dev(struct psp_dev *psd, u32 cmd);
+
+struct psp_assoc *psp_assoc_create(struct psp_dev *psd);
+struct psp_dev *psp_dev_get_for_sock(struct sock *sk);
+void psp_dev_tx_key_del(struct psp_dev *psd, struct psp_assoc *pas);
+int psp_sock_assoc_set_rx(struct sock *sk, struct psp_assoc *pas,
+ struct psp_key_parsed *key,
+ struct netlink_ext_ack *extack);
+int psp_sock_assoc_set_tx(struct sock *sk, struct psp_dev *psd,
+ u32 version, struct psp_key_parsed *key,
+ struct netlink_ext_ack *extack);
+void psp_assocs_key_rotated(struct psp_dev *psd);
+
+static inline void psp_dev_get(struct psp_dev *psd)
+{
+ refcount_inc(&psd->refcnt);
+}
+
+static inline bool psp_dev_tryget(struct psp_dev *psd)
+{
+ return refcount_inc_not_zero(&psd->refcnt);
+}
+
+static inline void psp_dev_put(struct psp_dev *psd)
+{
+ if (refcount_dec_and_test(&psd->refcnt))
+ psp_dev_free(psd);
+}
+
+static inline bool psp_dev_is_registered(struct psp_dev *psd)
+{
+ lockdep_assert_held(&psd->lock);
+ return !!psd->ops;
+}
+
+#endif /* __PSP_PSP_H */
diff --git a/net/psp/psp_main.c b/net/psp/psp_main.c
new file mode 100644
index 000000000000..481aaf0fc9fc
--- /dev/null
+++ b/net/psp/psp_main.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+#include <net/net_namespace.h>
+#include <net/psp.h>
+#include <net/udp.h>
+
+#include "psp.h"
+#include "psp-nl-gen.h"
+
+DEFINE_XARRAY_ALLOC1(psp_devs);
+struct mutex psp_devs_lock;
+
+/**
+ * DOC: PSP locking
+ *
+ * psp_devs_lock protects the psp_devs xarray.
+ * Ordering is take the psp_devs_lock and then the instance lock.
+ * Each instance is protected by RCU, and has a refcount.
+ * When driver unregisters the instance gets flushed, but struct sticks around.
+ */
+
+/**
+ * psp_dev_check_access() - check if user in a given net ns can access PSP dev
+ * @psd: PSP device structure user is trying to access
+ * @net: net namespace user is in
+ *
+ * Return: 0 if PSP device should be visible in @net, errno otherwise.
+ */
+int psp_dev_check_access(struct psp_dev *psd, struct net *net)
+{
+ if (dev_net(psd->main_netdev) == net)
+ return 0;
+ return -ENOENT;
+}
+
+/**
+ * psp_dev_create() - create and register PSP device
+ * @netdev: main netdevice
+ * @psd_ops: driver callbacks
+ * @psd_caps: device capabilities
+ * @priv_ptr: back-pointer to driver private data
+ *
+ * Return: pointer to allocated PSP device, or ERR_PTR.
+ */
+struct psp_dev *
+psp_dev_create(struct net_device *netdev,
+ struct psp_dev_ops *psd_ops, struct psp_dev_caps *psd_caps,
+ void *priv_ptr)
+{
+ struct psp_dev *psd;
+ static u32 last_id;
+ int err;
+
+ if (WARN_ON(!psd_caps->versions ||
+ !psd_ops->set_config ||
+ !psd_ops->key_rotate ||
+ !psd_ops->rx_spi_alloc ||
+ !psd_ops->tx_key_add ||
+ !psd_ops->tx_key_del))
+ return ERR_PTR(-EINVAL);
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return ERR_PTR(-ENOMEM);
+
+ psd->main_netdev = netdev;
+ psd->ops = psd_ops;
+ psd->caps = psd_caps;
+ psd->drv_priv = priv_ptr;
+
+ mutex_init(&psd->lock);
+ INIT_LIST_HEAD(&psd->active_assocs);
+ INIT_LIST_HEAD(&psd->prev_assocs);
+ INIT_LIST_HEAD(&psd->stale_assocs);
+ refcount_set(&psd->refcnt, 1);
+
+ mutex_lock(&psp_devs_lock);
+ err = xa_alloc_cyclic(&psp_devs, &psd->id, psd, xa_limit_16b,
+ &last_id, GFP_KERNEL);
+ if (err) {
+ mutex_unlock(&psp_devs_lock);
+ kfree(psd);
+ return ERR_PTR(err);
+ }
+ mutex_lock(&psd->lock);
+ mutex_unlock(&psp_devs_lock);
+
+ psp_nl_notify_dev(psd, PSP_CMD_DEV_ADD_NTF);
+
+ rcu_assign_pointer(netdev->psp_dev, psd);
+
+ mutex_unlock(&psd->lock);
+
+ return psd;
+}
+EXPORT_SYMBOL(psp_dev_create);
+
+void psp_dev_free(struct psp_dev *psd)
+{
+ mutex_lock(&psp_devs_lock);
+ xa_erase(&psp_devs, psd->id);
+ mutex_unlock(&psp_devs_lock);
+
+ mutex_destroy(&psd->lock);
+ kfree_rcu(psd, rcu);
+}
+
+/**
+ * psp_dev_unregister() - unregister PSP device
+ * @psd: PSP device structure
+ */
+void psp_dev_unregister(struct psp_dev *psd)
+{
+ struct psp_assoc *pas, *next;
+
+ mutex_lock(&psp_devs_lock);
+ mutex_lock(&psd->lock);
+
+ psp_nl_notify_dev(psd, PSP_CMD_DEV_DEL_NTF);
+
+ /* Wait until psp_dev_free() to call xa_erase() to prevent a
+ * different psd from being added to the xarray with this id, while
+ * there are still references to this psd being held.
+ */
+ xa_store(&psp_devs, psd->id, NULL, GFP_KERNEL);
+ mutex_unlock(&psp_devs_lock);
+
+ list_splice_init(&psd->active_assocs, &psd->prev_assocs);
+ list_splice_init(&psd->prev_assocs, &psd->stale_assocs);
+ list_for_each_entry_safe(pas, next, &psd->stale_assocs, assocs_list)
+ psp_dev_tx_key_del(psd, pas);
+
+ rcu_assign_pointer(psd->main_netdev->psp_dev, NULL);
+
+ psd->ops = NULL;
+ psd->drv_priv = NULL;
+
+ mutex_unlock(&psd->lock);
+
+ psp_dev_put(psd);
+}
+EXPORT_SYMBOL(psp_dev_unregister);
+
+unsigned int psp_key_size(u32 version)
+{
+ switch (version) {
+ case PSP_VERSION_HDR0_AES_GCM_128:
+ case PSP_VERSION_HDR0_AES_GMAC_128:
+ return 16;
+ case PSP_VERSION_HDR0_AES_GCM_256:
+ case PSP_VERSION_HDR0_AES_GMAC_256:
+ return 32;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(psp_key_size);
+
+static void psp_write_headers(struct net *net, struct sk_buff *skb, __be32 spi,
+ u8 ver, unsigned int udp_len, __be16 sport)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ struct psphdr *psph = (struct psphdr *)(uh + 1);
+
+ uh->dest = htons(PSP_DEFAULT_UDP_PORT);
+ uh->source = udp_flow_src_port(net, skb, 0, 0, false);
+ uh->check = 0;
+ uh->len = htons(udp_len);
+
+ psph->nexthdr = IPPROTO_TCP;
+ psph->hdrlen = PSP_HDRLEN_NOOPT;
+ psph->crypt_offset = 0;
+ psph->verfl = FIELD_PREP(PSPHDR_VERFL_VERSION, ver) |
+ FIELD_PREP(PSPHDR_VERFL_ONE, 1);
+ psph->spi = spi;
+ memset(&psph->iv, 0, sizeof(psph->iv));
+}
+
+/* Encapsulate a TCP packet with PSP by adding the UDP+PSP headers and filling
+ * them in.
+ */
+bool psp_dev_encapsulate(struct net *net, struct sk_buff *skb, __be32 spi,
+ u8 ver, __be16 sport)
+{
+ u32 network_len = skb_network_header_len(skb);
+ u32 ethr_len = skb_mac_header_len(skb);
+ u32 bufflen = ethr_len + network_len;
+
+ if (skb_cow_head(skb, PSP_ENCAP_HLEN))
+ return false;
+
+ skb_push(skb, PSP_ENCAP_HLEN);
+ skb->mac_header -= PSP_ENCAP_HLEN;
+ skb->network_header -= PSP_ENCAP_HLEN;
+ skb->transport_header -= PSP_ENCAP_HLEN;
+ memmove(skb->data, skb->data + PSP_ENCAP_HLEN, bufflen);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
+ be16_add_cpu(&ip_hdr(skb)->tot_len, PSP_ENCAP_HLEN);
+ ip_hdr(skb)->check = 0;
+ ip_hdr(skb)->check =
+ ip_fast_csum((u8 *)ip_hdr(skb), ip_hdr(skb)->ihl);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_hdr(skb)->nexthdr = IPPROTO_UDP;
+ be16_add_cpu(&ipv6_hdr(skb)->payload_len, PSP_ENCAP_HLEN);
+ } else {
+ return false;
+ }
+
+ skb_set_inner_ipproto(skb, IPPROTO_TCP);
+ skb_set_inner_transport_header(skb, skb_transport_offset(skb) +
+ PSP_ENCAP_HLEN);
+ skb->encapsulation = 1;
+ psp_write_headers(net, skb, spi, ver,
+ skb->len - skb_transport_offset(skb), sport);
+
+ return true;
+}
+EXPORT_SYMBOL(psp_dev_encapsulate);
+
+/* Receive handler for PSP packets.
+ *
+ * Presently it accepts only already-authenticated packets and does not
+ * support optional fields, such as virtualization cookies. The caller should
+ * ensure that skb->data is pointing to the mac header, and that skb->mac_len
+ * is set. This function does not currently adjust skb->csum (CHECKSUM_COMPLETE
+ * is not supported).
+ */
+int psp_dev_rcv(struct sk_buff *skb, u16 dev_id, u8 generation, bool strip_icv)
+{
+ int l2_hlen = 0, l3_hlen, encap;
+ struct psp_skb_ext *pse;
+ struct psphdr *psph;
+ struct ethhdr *eth;
+ struct udphdr *uh;
+ __be16 proto;
+ bool is_udp;
+
+ eth = (struct ethhdr *)skb->data;
+ proto = __vlan_get_protocol(skb, eth->h_proto, &l2_hlen);
+ if (proto == htons(ETH_P_IP))
+ l3_hlen = sizeof(struct iphdr);
+ else if (proto == htons(ETH_P_IPV6))
+ l3_hlen = sizeof(struct ipv6hdr);
+ else
+ return -EINVAL;
+
+ if (unlikely(!pskb_may_pull(skb, l2_hlen + l3_hlen + PSP_ENCAP_HLEN)))
+ return -EINVAL;
+
+ if (proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(skb->data + l2_hlen);
+
+ is_udp = iph->protocol == IPPROTO_UDP;
+ l3_hlen = iph->ihl * 4;
+ if (l3_hlen != sizeof(struct iphdr) &&
+ !pskb_may_pull(skb, l2_hlen + l3_hlen + PSP_ENCAP_HLEN))
+ return -EINVAL;
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + l2_hlen);
+
+ is_udp = ipv6h->nexthdr == IPPROTO_UDP;
+ }
+
+ if (unlikely(!is_udp))
+ return -EINVAL;
+
+ uh = (struct udphdr *)(skb->data + l2_hlen + l3_hlen);
+ if (unlikely(uh->dest != htons(PSP_DEFAULT_UDP_PORT)))
+ return -EINVAL;
+
+ pse = skb_ext_add(skb, SKB_EXT_PSP);
+ if (!pse)
+ return -EINVAL;
+
+ psph = (struct psphdr *)(skb->data + l2_hlen + l3_hlen +
+ sizeof(struct udphdr));
+ pse->spi = psph->spi;
+ pse->dev_id = dev_id;
+ pse->generation = generation;
+ pse->version = FIELD_GET(PSPHDR_VERFL_VERSION, psph->verfl);
+
+ encap = PSP_ENCAP_HLEN;
+ encap += strip_icv ? PSP_TRL_SIZE : 0;
+
+ if (proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(skb->data + l2_hlen);
+
+ iph->protocol = psph->nexthdr;
+ iph->tot_len = htons(ntohs(iph->tot_len) - encap);
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
+ } else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + l2_hlen);
+
+ ipv6h->nexthdr = psph->nexthdr;
+ ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) - encap);
+ }
+
+ memmove(skb->data + PSP_ENCAP_HLEN, skb->data, l2_hlen + l3_hlen);
+ skb_pull(skb, PSP_ENCAP_HLEN);
+
+ if (strip_icv)
+ pskb_trim(skb, skb->len - PSP_TRL_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_dev_rcv);
+
+static int __init psp_init(void)
+{
+ mutex_init(&psp_devs_lock);
+
+ return genl_register_family(&psp_nl_family);
+}
+
+subsys_initcall(psp_init);
diff --git a/net/psp/psp_nl.c b/net/psp/psp_nl.c
new file mode 100644
index 000000000000..8aaca62744c3
--- /dev/null
+++ b/net/psp/psp_nl.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/skbuff.h>
+#include <linux/xarray.h>
+#include <net/genetlink.h>
+#include <net/psp.h>
+#include <net/sock.h>
+
+#include "psp-nl-gen.h"
+#include "psp.h"
+
+/* Netlink helpers */
+
+static struct sk_buff *psp_nl_reply_new(struct genl_info *info)
+{
+ struct sk_buff *rsp;
+ void *hdr;
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return NULL;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ nlmsg_free(rsp);
+ return NULL;
+ }
+
+ return rsp;
+}
+
+static int psp_nl_reply_send(struct sk_buff *rsp, struct genl_info *info)
+{
+ /* Note that this *only* works with a single message per skb! */
+ nlmsg_end(rsp, (struct nlmsghdr *)rsp->data);
+
+ return genlmsg_reply(rsp, info);
+}
+
+/* Device stuff */
+
+static struct psp_dev *
+psp_device_get_and_lock(struct net *net, struct nlattr *dev_id)
+{
+ struct psp_dev *psd;
+ int err;
+
+ mutex_lock(&psp_devs_lock);
+ psd = xa_load(&psp_devs, nla_get_u32(dev_id));
+ if (!psd) {
+ mutex_unlock(&psp_devs_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&psd->lock);
+ mutex_unlock(&psp_devs_lock);
+
+ err = psp_dev_check_access(psd, net);
+ if (err) {
+ mutex_unlock(&psd->lock);
+ return ERR_PTR(err);
+ }
+
+ return psd;
+}
+
+int psp_device_get_locked(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ if (GENL_REQ_ATTR_CHECK(info, PSP_A_DEV_ID))
+ return -EINVAL;
+
+ info->user_ptr[0] = psp_device_get_and_lock(genl_info_net(info),
+ info->attrs[PSP_A_DEV_ID]);
+ return PTR_ERR_OR_ZERO(info->user_ptr[0]);
+}
+
+void
+psp_device_unlock(const struct genl_split_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct socket *socket = info->user_ptr[1];
+ struct psp_dev *psd = info->user_ptr[0];
+
+ mutex_unlock(&psd->lock);
+ if (socket)
+ sockfd_put(socket);
+}
+
+static int
+psp_nl_dev_fill(struct psp_dev *psd, struct sk_buff *rsp,
+ const struct genl_info *info)
+{
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(rsp, PSP_A_DEV_ID, psd->id) ||
+ nla_put_u32(rsp, PSP_A_DEV_IFINDEX, psd->main_netdev->ifindex) ||
+ nla_put_u32(rsp, PSP_A_DEV_PSP_VERSIONS_CAP, psd->caps->versions) ||
+ nla_put_u32(rsp, PSP_A_DEV_PSP_VERSIONS_ENA, psd->config.versions))
+ goto err_cancel_msg;
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+void psp_nl_notify_dev(struct psp_dev *psd, u32 cmd)
+{
+ struct genl_info info;
+ struct sk_buff *ntf;
+
+ if (!genl_has_listeners(&psp_nl_family, dev_net(psd->main_netdev),
+ PSP_NLGRP_MGMT))
+ return;
+
+ ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!ntf)
+ return;
+
+ genl_info_init_ntf(&info, &psp_nl_family, cmd);
+ if (psp_nl_dev_fill(psd, ntf, &info)) {
+ nlmsg_free(ntf);
+ return;
+ }
+
+ genlmsg_multicast_netns(&psp_nl_family, dev_net(psd->main_netdev), ntf,
+ 0, PSP_NLGRP_MGMT, GFP_KERNEL);
+}
+
+int psp_nl_dev_get_doit(struct sk_buff *req, struct genl_info *info)
+{
+ struct psp_dev *psd = info->user_ptr[0];
+ struct sk_buff *rsp;
+ int err;
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ err = psp_nl_dev_fill(psd, rsp, info);
+ if (err)
+ goto err_free_msg;
+
+ return genlmsg_reply(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
+
+static int
+psp_nl_dev_get_dumpit_one(struct sk_buff *rsp, struct netlink_callback *cb,
+ struct psp_dev *psd)
+{
+ if (psp_dev_check_access(psd, sock_net(rsp->sk)))
+ return 0;
+
+ return psp_nl_dev_fill(psd, rsp, genl_info_dump(cb));
+}
+
+int psp_nl_dev_get_dumpit(struct sk_buff *rsp, struct netlink_callback *cb)
+{
+ struct psp_dev *psd;
+ int err = 0;
+
+ mutex_lock(&psp_devs_lock);
+ xa_for_each_start(&psp_devs, cb->args[0], psd, cb->args[0]) {
+ mutex_lock(&psd->lock);
+ err = psp_nl_dev_get_dumpit_one(rsp, cb, psd);
+ mutex_unlock(&psd->lock);
+ if (err)
+ break;
+ }
+ mutex_unlock(&psp_devs_lock);
+
+ return err;
+}
+
+int psp_nl_dev_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct psp_dev *psd = info->user_ptr[0];
+ struct psp_dev_config new_config;
+ struct sk_buff *rsp;
+ int err;
+
+ memcpy(&new_config, &psd->config, sizeof(new_config));
+
+ if (info->attrs[PSP_A_DEV_PSP_VERSIONS_ENA]) {
+ new_config.versions =
+ nla_get_u32(info->attrs[PSP_A_DEV_PSP_VERSIONS_ENA]);
+ if (new_config.versions & ~psd->caps->versions) {
+ NL_SET_ERR_MSG(info->extack, "Requested PSP versions not supported by the device");
+ return -EINVAL;
+ }
+ } else {
+ NL_SET_ERR_MSG(info->extack, "No settings present");
+ return -EINVAL;
+ }
+
+ rsp = psp_nl_reply_new(info);
+ if (!rsp)
+ return -ENOMEM;
+
+ if (memcmp(&new_config, &psd->config, sizeof(new_config))) {
+ err = psd->ops->set_config(psd, &new_config, info->extack);
+ if (err)
+ goto err_free_rsp;
+
+ memcpy(&psd->config, &new_config, sizeof(new_config));
+ }
+
+ psp_nl_notify_dev(psd, PSP_CMD_DEV_CHANGE_NTF);
+
+ return psp_nl_reply_send(rsp, info);
+
+err_free_rsp:
+ nlmsg_free(rsp);
+ return err;
+}
+
+int psp_nl_key_rotate_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct psp_dev *psd = info->user_ptr[0];
+ struct genl_info ntf_info;
+ struct sk_buff *ntf, *rsp;
+ u8 prev_gen;
+ int err;
+
+ rsp = psp_nl_reply_new(info);
+ if (!rsp)
+ return -ENOMEM;
+
+ genl_info_init_ntf(&ntf_info, &psp_nl_family, PSP_CMD_KEY_ROTATE_NTF);
+ ntf = psp_nl_reply_new(&ntf_info);
+ if (!ntf) {
+ err = -ENOMEM;
+ goto err_free_rsp;
+ }
+
+ if (nla_put_u32(rsp, PSP_A_DEV_ID, psd->id) ||
+ nla_put_u32(ntf, PSP_A_DEV_ID, psd->id)) {
+ err = -EMSGSIZE;
+ goto err_free_ntf;
+ }
+
+ /* suggest the next gen number, driver can override */
+ prev_gen = psd->generation;
+ psd->generation = (prev_gen + 1) & PSP_GEN_VALID_MASK;
+
+ err = psd->ops->key_rotate(psd, info->extack);
+ if (err)
+ goto err_free_ntf;
+
+ WARN_ON_ONCE((psd->generation && psd->generation == prev_gen) ||
+ psd->generation & ~PSP_GEN_VALID_MASK);
+
+ psp_assocs_key_rotated(psd);
+
+ nlmsg_end(ntf, (struct nlmsghdr *)ntf->data);
+ genlmsg_multicast_netns(&psp_nl_family, dev_net(psd->main_netdev), ntf,
+ 0, PSP_NLGRP_USE, GFP_KERNEL);
+ return psp_nl_reply_send(rsp, info);
+
+err_free_ntf:
+ nlmsg_free(ntf);
+err_free_rsp:
+ nlmsg_free(rsp);
+ return err;
+}
+
+/* Key etc. */
+
+int psp_assoc_device_get_locked(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ struct socket *socket;
+ struct psp_dev *psd;
+ struct nlattr *id;
+ int fd, err;
+
+ if (GENL_REQ_ATTR_CHECK(info, PSP_A_ASSOC_SOCK_FD))
+ return -EINVAL;
+
+ fd = nla_get_u32(info->attrs[PSP_A_ASSOC_SOCK_FD]);
+ socket = sockfd_lookup(fd, &err);
+ if (!socket)
+ return err;
+
+ if (!sk_is_tcp(socket->sk)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[PSP_A_ASSOC_SOCK_FD],
+ "Unsupported socket family and type");
+ err = -EOPNOTSUPP;
+ goto err_sock_put;
+ }
+
+ psd = psp_dev_get_for_sock(socket->sk);
+ if (psd) {
+ err = psp_dev_check_access(psd, genl_info_net(info));
+ if (err) {
+ psp_dev_put(psd);
+ psd = NULL;
+ }
+ }
+
+ if (!psd && GENL_REQ_ATTR_CHECK(info, PSP_A_ASSOC_DEV_ID)) {
+ err = -EINVAL;
+ goto err_sock_put;
+ }
+
+ id = info->attrs[PSP_A_ASSOC_DEV_ID];
+ if (psd) {
+ mutex_lock(&psd->lock);
+ if (id && psd->id != nla_get_u32(id)) {
+ mutex_unlock(&psd->lock);
+ NL_SET_ERR_MSG_ATTR(info->extack, id,
+ "Device id vs socket mismatch");
+ err = -EINVAL;
+ goto err_psd_put;
+ }
+
+ psp_dev_put(psd);
+ } else {
+ psd = psp_device_get_and_lock(genl_info_net(info), id);
+ if (IS_ERR(psd)) {
+ err = PTR_ERR(psd);
+ goto err_sock_put;
+ }
+ }
+
+ info->user_ptr[0] = psd;
+ info->user_ptr[1] = socket;
+
+ return 0;
+
+err_psd_put:
+ psp_dev_put(psd);
+err_sock_put:
+ sockfd_put(socket);
+ return err;
+}
+
+static int
+psp_nl_parse_key(struct genl_info *info, u32 attr, struct psp_key_parsed *key,
+ unsigned int key_sz)
+{
+ struct nlattr *nest = info->attrs[attr];
+ struct nlattr *tb[PSP_A_KEYS_SPI + 1];
+ u32 spi;
+ int err;
+
+ err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
+ psp_keys_nl_policy, info->extack);
+ if (err)
+ return err;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, PSP_A_KEYS_KEY) ||
+ NL_REQ_ATTR_CHECK(info->extack, nest, tb, PSP_A_KEYS_SPI))
+ return -EINVAL;
+
+ if (nla_len(tb[PSP_A_KEYS_KEY]) != key_sz) {
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[PSP_A_KEYS_KEY],
+ "incorrect key length");
+ return -EINVAL;
+ }
+
+ spi = nla_get_u32(tb[PSP_A_KEYS_SPI]);
+ if (!(spi & PSP_SPI_KEY_ID)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[PSP_A_KEYS_KEY],
+ "invalid SPI: lower 31b must be non-zero");
+ return -EINVAL;
+ }
+
+ key->spi = cpu_to_be32(spi);
+ memcpy(key->key, nla_data(tb[PSP_A_KEYS_KEY]), key_sz);
+
+ return 0;
+}
+
+static int
+psp_nl_put_key(struct sk_buff *skb, u32 attr, u32 version,
+ struct psp_key_parsed *key)
+{
+ int key_sz = psp_key_size(version);
+ void *nest;
+
+ nest = nla_nest_start(skb, attr);
+
+ if (nla_put_u32(skb, PSP_A_KEYS_SPI, be32_to_cpu(key->spi)) ||
+ nla_put(skb, PSP_A_KEYS_KEY, key_sz, key->key)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+}
+
+int psp_nl_rx_assoc_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct socket *socket = info->user_ptr[1];
+ struct psp_dev *psd = info->user_ptr[0];
+ struct psp_key_parsed key;
+ struct psp_assoc *pas;
+ struct sk_buff *rsp;
+ u32 version;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, PSP_A_ASSOC_VERSION))
+ return -EINVAL;
+
+ version = nla_get_u32(info->attrs[PSP_A_ASSOC_VERSION]);
+ if (!(psd->caps->versions & (1 << version))) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[PSP_A_ASSOC_VERSION]);
+ return -EOPNOTSUPP;
+ }
+
+ rsp = psp_nl_reply_new(info);
+ if (!rsp)
+ return -ENOMEM;
+
+ pas = psp_assoc_create(psd);
+ if (!pas) {
+ err = -ENOMEM;
+ goto err_free_rsp;
+ }
+ pas->version = version;
+
+ err = psd->ops->rx_spi_alloc(psd, version, &key, info->extack);
+ if (err)
+ goto err_free_pas;
+
+ if (nla_put_u32(rsp, PSP_A_ASSOC_DEV_ID, psd->id) ||
+ psp_nl_put_key(rsp, PSP_A_ASSOC_RX_KEY, version, &key)) {
+ err = -EMSGSIZE;
+ goto err_free_pas;
+ }
+
+ err = psp_sock_assoc_set_rx(socket->sk, pas, &key, info->extack);
+ if (err) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[PSP_A_ASSOC_SOCK_FD]);
+ goto err_free_pas;
+ }
+ psp_assoc_put(pas);
+
+ return psp_nl_reply_send(rsp, info);
+
+err_free_pas:
+ psp_assoc_put(pas);
+err_free_rsp:
+ nlmsg_free(rsp);
+ return err;
+}
+
+int psp_nl_tx_assoc_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct socket *socket = info->user_ptr[1];
+ struct psp_dev *psd = info->user_ptr[0];
+ struct psp_key_parsed key;
+ struct sk_buff *rsp;
+ unsigned int key_sz;
+ u32 version;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, PSP_A_ASSOC_VERSION) ||
+ GENL_REQ_ATTR_CHECK(info, PSP_A_ASSOC_TX_KEY))
+ return -EINVAL;
+
+ version = nla_get_u32(info->attrs[PSP_A_ASSOC_VERSION]);
+ if (!(psd->caps->versions & (1 << version))) {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[PSP_A_ASSOC_VERSION]);
+ return -EOPNOTSUPP;
+ }
+
+ key_sz = psp_key_size(version);
+ if (!key_sz)
+ return -EINVAL;
+
+ err = psp_nl_parse_key(info, PSP_A_ASSOC_TX_KEY, &key, key_sz);
+ if (err < 0)
+ return err;
+
+ rsp = psp_nl_reply_new(info);
+ if (!rsp)
+ return -ENOMEM;
+
+ err = psp_sock_assoc_set_tx(socket->sk, psd, version, &key,
+ info->extack);
+ if (err)
+ goto err_free_msg;
+
+ return psp_nl_reply_send(rsp, info);
+
+err_free_msg:
+ nlmsg_free(rsp);
+ return err;
+}
diff --git a/net/psp/psp_sock.c b/net/psp/psp_sock.c
new file mode 100644
index 000000000000..a931d825d1cc
--- /dev/null
+++ b/net/psp/psp_sock.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/rcupdate.h>
+#include <linux/tcp.h>
+
+#include <net/ip.h>
+#include <net/psp.h>
+#include "psp.h"
+
+struct psp_dev *psp_dev_get_for_sock(struct sock *sk)
+{
+ struct psp_dev *psd = NULL;
+ struct dst_entry *dst;
+
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ if (dst) {
+ psd = rcu_dereference(dst_dev_rcu(dst)->psp_dev);
+ if (psd && !psp_dev_tryget(psd))
+ psd = NULL;
+ }
+ rcu_read_unlock();
+
+ return psd;
+}
+
+static struct sk_buff *
+psp_validate_xmit(struct sock *sk, struct net_device *dev, struct sk_buff *skb)
+{
+ struct psp_assoc *pas;
+ bool good;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ good = !pas || rcu_access_pointer(dev->psp_dev) == pas->psd;
+ rcu_read_unlock();
+ if (!good) {
+ sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_PSP_OUTPUT);
+ return NULL;
+ }
+
+ return skb;
+}
+
+struct psp_assoc *psp_assoc_create(struct psp_dev *psd)
+{
+ struct psp_assoc *pas;
+
+ lockdep_assert_held(&psd->lock);
+
+ pas = kzalloc(struct_size(pas, drv_data, psd->caps->assoc_drv_spc),
+ GFP_KERNEL_ACCOUNT);
+ if (!pas)
+ return NULL;
+
+ pas->psd = psd;
+ pas->dev_id = psd->id;
+ pas->generation = psd->generation;
+ psp_dev_get(psd);
+ refcount_set(&pas->refcnt, 1);
+
+ list_add_tail(&pas->assocs_list, &psd->active_assocs);
+
+ return pas;
+}
+
+static struct psp_assoc *psp_assoc_dummy(struct psp_assoc *pas)
+{
+ struct psp_dev *psd = pas->psd;
+ size_t sz;
+
+ lockdep_assert_held(&psd->lock);
+
+ sz = struct_size(pas, drv_data, psd->caps->assoc_drv_spc);
+ return kmemdup(pas, sz, GFP_KERNEL);
+}
+
+static int psp_dev_tx_key_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ return psd->ops->tx_key_add(psd, pas, extack);
+}
+
+void psp_dev_tx_key_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ if (pas->tx.spi)
+ psd->ops->tx_key_del(psd, pas);
+ list_del(&pas->assocs_list);
+}
+
+static void psp_assoc_free(struct work_struct *work)
+{
+ struct psp_assoc *pas = container_of(work, struct psp_assoc, work);
+ struct psp_dev *psd = pas->psd;
+
+ mutex_lock(&psd->lock);
+ if (psd->ops)
+ psp_dev_tx_key_del(psd, pas);
+ mutex_unlock(&psd->lock);
+ psp_dev_put(psd);
+ kfree(pas);
+}
+
+static void psp_assoc_free_queue(struct rcu_head *head)
+{
+ struct psp_assoc *pas = container_of(head, struct psp_assoc, rcu);
+
+ INIT_WORK(&pas->work, psp_assoc_free);
+ schedule_work(&pas->work);
+}
+
+/**
+ * psp_assoc_put() - release a reference on a PSP association
+ * @pas: association to release
+ */
+void psp_assoc_put(struct psp_assoc *pas)
+{
+ if (pas && refcount_dec_and_test(&pas->refcnt))
+ call_rcu(&pas->rcu, psp_assoc_free_queue);
+}
+
+void psp_sk_assoc_free(struct sock *sk)
+{
+ struct psp_assoc *pas = rcu_dereference_protected(sk->psp_assoc, 1);
+
+ rcu_assign_pointer(sk->psp_assoc, NULL);
+ psp_assoc_put(pas);
+}
+
+int psp_sock_assoc_set_rx(struct sock *sk, struct psp_assoc *pas,
+ struct psp_key_parsed *key,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ memcpy(&pas->rx, key, sizeof(*key));
+
+ lock_sock(sk);
+
+ if (psp_sk_assoc(sk)) {
+ NL_SET_ERR_MSG(extack, "Socket already has PSP state");
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+
+ refcount_inc(&pas->refcnt);
+ rcu_assign_pointer(sk->psp_assoc, pas);
+ err = 0;
+
+exit_unlock:
+ release_sock(sk);
+
+ return err;
+}
+
+static int psp_sock_recv_queue_check(struct sock *sk, struct psp_assoc *pas)
+{
+ struct psp_skb_ext *pse;
+ struct sk_buff *skb;
+
+ skb_rbtree_walk(skb, &tcp_sk(sk)->out_of_order_queue) {
+ pse = skb_ext_find(skb, SKB_EXT_PSP);
+ if (!psp_pse_matches_pas(pse, pas))
+ return -EBUSY;
+ }
+
+ skb_queue_walk(&sk->sk_receive_queue, skb) {
+ pse = skb_ext_find(skb, SKB_EXT_PSP);
+ if (!psp_pse_matches_pas(pse, pas))
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int psp_sock_assoc_set_tx(struct sock *sk, struct psp_dev *psd,
+ u32 version, struct psp_key_parsed *key,
+ struct netlink_ext_ack *extack)
+{
+ struct inet_connection_sock *icsk;
+ struct psp_assoc *pas, *dummy;
+ int err;
+
+ lock_sock(sk);
+
+ pas = psp_sk_assoc(sk);
+ if (!pas) {
+ NL_SET_ERR_MSG(extack, "Socket has no Rx key");
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+ if (pas->psd != psd) {
+ NL_SET_ERR_MSG(extack, "Rx key from different device");
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+ if (pas->version != version) {
+ NL_SET_ERR_MSG(extack,
+ "PSP version mismatch with existing state");
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+ if (pas->tx.spi) {
+ NL_SET_ERR_MSG(extack, "Tx key already set");
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+
+ err = psp_sock_recv_queue_check(sk, pas);
+ if (err) {
+ NL_SET_ERR_MSG(extack, "Socket has incompatible segments already in the recv queue");
+ goto exit_unlock;
+ }
+
+ /* Pass a fake association to drivers to make sure they don't
+ * try to store pointers to it. For re-keying we'll need to
+ * re-allocate the assoc structures.
+ */
+ dummy = psp_assoc_dummy(pas);
+ if (!dummy) {
+ err = -ENOMEM;
+ goto exit_unlock;
+ }
+
+ memcpy(&dummy->tx, key, sizeof(*key));
+ err = psp_dev_tx_key_add(psd, dummy, extack);
+ if (err)
+ goto exit_free_dummy;
+
+ memcpy(pas->drv_data, dummy->drv_data, psd->caps->assoc_drv_spc);
+ memcpy(&pas->tx, key, sizeof(*key));
+
+ WRITE_ONCE(sk->sk_validate_xmit_skb, psp_validate_xmit);
+ tcp_write_collapse_fence(sk);
+ pas->upgrade_seq = tcp_sk(sk)->rcv_nxt;
+
+ icsk = inet_csk(sk);
+ icsk->icsk_ext_hdr_len += psp_sk_overhead(sk);
+ icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
+
+exit_free_dummy:
+ kfree(dummy);
+exit_unlock:
+ release_sock(sk);
+ return err;
+}
+
+void psp_assocs_key_rotated(struct psp_dev *psd)
+{
+ struct psp_assoc *pas, *next;
+
+ /* Mark the stale associations as invalid, they will no longer
+ * be able to Rx any traffic.
+ */
+ list_for_each_entry_safe(pas, next, &psd->prev_assocs, assocs_list)
+ pas->generation |= ~PSP_GEN_VALID_MASK;
+ list_splice_init(&psd->prev_assocs, &psd->stale_assocs);
+ list_splice_init(&psd->active_assocs, &psd->prev_assocs);
+
+ /* TODO: we should inform the sockets that got shut down */
+}
+
+void psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk)
+{
+ struct psp_assoc *pas = psp_sk_assoc(sk);
+
+ if (pas)
+ refcount_inc(&pas->refcnt);
+ rcu_assign_pointer(tw->psp_assoc, pas);
+ tw->tw_validate_xmit_skb = psp_validate_xmit;
+}
+
+void psp_twsk_assoc_free(struct inet_timewait_sock *tw)
+{
+ struct psp_assoc *pas = rcu_dereference_protected(tw->psp_assoc, 1);
+
+ rcu_assign_pointer(tw->psp_assoc, NULL);
+ psp_assoc_put(pas);
+}
+
+void psp_reply_set_decrypted(const struct sock *sk, struct sk_buff *skb)
+{
+ struct psp_assoc *pas;
+
+ rcu_read_lock();
+ pas = psp_sk_get_assoc_rcu(sk);
+ if (pas && pas->tx.spi)
+ skb->decrypted = 1;
+ rcu_read_unlock();
+}
+EXPORT_IPV6_MOD_GPL(psp_reply_set_decrypted);
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 8435a20968ef..4a7217fbeab6 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -242,7 +242,7 @@ static __poll_t rds_poll(struct file *file, struct socket *sock,
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
mask |= (EPOLLOUT | EPOLLWRNORM);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= EPOLLERR;
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
/* clear state any time we wake a seen-congested socket */
@@ -598,7 +598,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
}
if (addr_type & IPV6_ADDR_LINKLOCAL) {
- /* If socket is arleady bound to a link local address,
+ /* If socket is already bound to a link local address,
* the peer address must be on the same link.
*/
if (sin6->sin6_scope_id == 0 ||
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d62f486ab29f..68bc88cce84e 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -57,16 +57,17 @@ static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
static u32 rds6_hash_secret __read_mostly;
static u32 rds_hash_secret __read_mostly;
- u32 lhash, fhash, hash;
+ __be32 lhash, fhash;
+ u32 hash;
net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
- lhash = (__force u32)laddr->s6_addr32[3];
+ lhash = laddr->s6_addr32[3];
#if IS_ENABLED(CONFIG_IPV6)
- fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
+ fhash = (__force __be32)__ipv6_addr_jhash(faddr, rds6_hash_secret);
#else
- fhash = (__force u32)faddr->s6_addr32[3];
+ fhash = faddr->s6_addr32[3];
#endif
hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 28c1b0022178..bd861191157b 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
&off, PAGE_SIZE);
- if (unlikely(ret != ibmr->sg_dma_len))
- return ret < 0 ? ret : -EINVAL;
+ if (unlikely(ret != ibmr->sg_dma_len)) {
+ ret = ret < 0 ? ret : -EINVAL;
+ goto out_inc;
+ }
- if (cmpxchg(&frmr->fr_state,
- FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
- return -EBUSY;
+ if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
+ ret = -EBUSY;
+ goto out_inc;
+ }
atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
/* Failure here can be because of -ENOMEM as well */
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
- atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
- goto out;
+ goto out_inc;
}
/* Wait for the registration to complete in order to prevent an invalid
@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
*/
wait_event(frmr->fr_reg_done, !frmr->fr_reg);
-out:
+ return ret;
+out_inc:
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
return ret;
}
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index ea5e9aee4959..5884de8c6f45 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -108,7 +108,6 @@ struct rds_ib_mr_pool {
};
extern struct workqueue_struct *rds_ib_mr_wq;
-extern bool prefer_frmr;
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
int npages);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index d1cfceeff133..6585164c7059 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -672,7 +672,8 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
int rds_ib_mr_init(void)
{
- rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
+ rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rds_ib_mr_wq)
return -ENOMEM;
return 0;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e53b7f266bd7..4248dfa816eb 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1034,7 +1034,7 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring)) {
- rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN);
+ rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
rds_ib_stats_inc(s_ib_rx_refill_from_cq);
}
}
diff --git a/net/rds/message.c b/net/rds/message.c
index 7af59d2443e5..199a899a43e9 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -44,8 +44,8 @@ static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
-[RDS_EXTHDR_NPATHS] = sizeof(u16),
-[RDS_EXTHDR_GEN_NUM] = sizeof(u32),
+[RDS_EXTHDR_NPATHS] = sizeof(__be16),
+[RDS_EXTHDR_GEN_NUM] = sizeof(__be32),
};
void rds_message_addref(struct rds_message *rm)
diff --git a/net/rds/rds.h b/net/rds/rds.h
index dc360252c515..5b1c072e2e7f 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -93,7 +93,7 @@ enum {
/* Max number of multipaths per RDS connection. Must be a power of 2 */
#define RDS_MPATH_WORKERS 8
-#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
+#define RDS_MPATH_HASH(rs, n) (jhash_1word(ntohs((rs)->rs_bound_port), \
(rs)->rs_hash_initval) & ((n) - 1))
#define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 5627f80013f8..66205d6924bf 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -202,8 +202,8 @@ static void rds_recv_hs_exthdrs(struct rds_header *hdr,
unsigned int pos = 0, type, len;
union {
struct rds_ext_header_version version;
- u16 rds_npaths;
- u32 rds_gen_num;
+ __be16 rds_npaths;
+ __be32 rds_gen_num;
} buffer;
u32 new_peer_gen_num = 0;
diff --git a/net/rds/send.c b/net/rds/send.c
index 09a280110654..0b3d0ef2f008 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -232,7 +232,7 @@ restart:
* If not already working on one, grab the next message.
*
* cp_xmit_rm holds a ref while we're sending this message down
- * the connction. We can use this ref while holding the
+ * the connection. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
if (!rm) {
@@ -1454,8 +1454,8 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
cp->cp_conn->c_trans->t_mp_capable) {
- u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
- u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
+ __be16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
+ __be32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_NPATHS, &npaths,
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index d89bd8d0c354..91e34af3fe5d 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -105,10 +105,6 @@ int rds_tcp_accept_one(struct socket *sock)
int conn_state;
struct rds_conn_path *cp;
struct in6_addr *my_addr, *peer_addr;
- struct proto_accept_arg arg = {
- .flags = O_NONBLOCK,
- .kern = true,
- };
#if !IS_ENABLED(CONFIG_IPV6)
struct in6_addr saddr, daddr;
#endif
@@ -117,25 +113,9 @@ int rds_tcp_accept_one(struct socket *sock)
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
- ret = sock_create_lite(sock->sk->sk_family,
- sock->sk->sk_type, sock->sk->sk_protocol,
- &new_sock);
+ ret = kernel_accept(sock, &new_sock, O_NONBLOCK);
if (ret)
- goto out;
-
- ret = sock->ops->accept(sock, new_sock, &arg);
- if (ret < 0)
- goto out;
-
- /* sock_create_lite() does not get a hold on the owner module so we
- * need to do it here. Note that sock_release() uses sock->ops to
- * determine if it needs to decrement the reference count. So set
- * sock->ops after calling accept() in case that fails. And there's
- * no need to do try_module_get() as the listener should have a hold
- * already.
- */
- new_sock->ops = sock->ops;
- __module_get(new_sock->ops->owner);
+ return ret;
rds_tcp_keepalive(new_sock);
if (!rds_tcp_tune(new_sock)) {
@@ -298,15 +278,15 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
sin6 = (struct sockaddr_in6 *)&ss;
sin6->sin6_family = PF_INET6;
sin6->sin6_addr = in6addr_any;
- sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT);
+ sin6->sin6_port = htons(RDS_TCP_PORT);
sin6->sin6_scope_id = 0;
sin6->sin6_flowinfo = 0;
addr_len = sizeof(*sin6);
} else {
sin = (struct sockaddr_in *)&ss;
sin->sin_family = PF_INET;
- sin->sin_addr.s_addr = INADDR_ANY;
- sin->sin_port = (__force u16)htons(RDS_TCP_PORT);
+ sin->sin_addr.s_addr = htonl(INADDR_ANY);
+ sin->sin_port = htons(RDS_TCP_PORT);
addr_len = sizeof(*sin);
}
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 598d0a61bda7..53d286b10843 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -159,7 +159,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
rfkill_op_pending = true;
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
/* bypass the limiter for EPO */
- mod_delayed_work(system_wq, &rfkill_op_work, 0);
+ mod_delayed_work(system_percpu_wq, &rfkill_op_work, 0);
rfkill_last_scheduled = jiffies;
} else
rfkill_schedule_ratelimited();
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 41e657e97761..cf2dcec6ce5a 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -94,10 +94,10 @@ static const struct dmi_system_id rfkill_gpio_deny_table[] = {
static int rfkill_gpio_probe(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill;
- struct gpio_desc *gpio;
+ const char *type_name = NULL;
const char *name_property;
const char *type_property;
- const char *type_name;
+ struct gpio_desc *gpio;
int ret;
if (dmi_check_system(rfkill_gpio_deny_table))
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4e72b636a46a..543f9e8ebb69 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -170,7 +170,7 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose->neighbour = NULL;
}
}
@@ -212,7 +212,7 @@ start:
if (rose->device == dev) {
rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
}
@@ -655,7 +655,7 @@ static int rose_release(struct socket *sock)
break;
case ROSE_STATE_2:
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
@@ -823,6 +823,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
+ rose_neigh_put(rose->neighbour);
goto out_release;
}
@@ -834,12 +835,14 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
dev = rose_dev_first();
if (!dev) {
err = -ENETUNREACH;
+ rose_neigh_put(rose->neighbour);
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
+ rose_neigh_put(rose->neighbour);
dev_put(dev);
goto out_release;
}
@@ -874,8 +877,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose->state = ROSE_STATE_1;
- rose->neighbour->use++;
-
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
@@ -1077,7 +1078,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
GFP_ATOMIC);
make_rose->facilities = facilities;
- make_rose->neighbour->use++;
+ rose_neigh_hold(make_rose->neighbour);
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 4d67f36dce1b..0276b393f0e5 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -56,7 +56,7 @@ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -79,12 +79,12 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
case ROSE_CLEAR_CONFIRMATION:
rose_disconnect(sk, 0, -1, -1);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -101,6 +101,7 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
*/
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
+ enum skb_drop_reason dr; /* ignored */
struct rose_sock *rose = rose_sk(sk);
int queued = 0;
@@ -120,7 +121,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
case ROSE_RR:
@@ -162,7 +163,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ if (!sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN, &dr) &&
__sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
@@ -233,7 +234,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -253,7 +254,7 @@ static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int framety
if (frametype == ROSE_CLEAR_REQUEST) {
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose_sk(sk)->neighbour->use--;
+ rose_neigh_put(rose_sk(sk)->neighbour);
}
return 0;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 2dd6bd3a3011..a1e9b05ef6f5 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -93,11 +93,11 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
- rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
+ refcount_set(&rose_neigh->use, 1);
skb_queue_head_init(&rose_neigh->queue);
@@ -178,6 +178,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
}
}
rose_neigh->count++;
+ rose_neigh_hold(rose_neigh);
goto out;
}
@@ -187,6 +188,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
+ rose_neigh_hold(rose_neigh);
}
out:
@@ -234,20 +236,12 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
- kfree(rose_neigh->digipeat);
- kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
- kfree(rose_neigh->digipeat);
- kfree(rose_neigh);
return;
}
@@ -263,10 +257,10 @@ static void rose_remove_route(struct rose_route *rose_route)
struct rose_route *s;
if (rose_route->neigh1 != NULL)
- rose_route->neigh1->use--;
+ rose_neigh_put(rose_route->neigh1);
if (rose_route->neigh2 != NULL)
- rose_route->neigh2->use--;
+ rose_neigh_put(rose_route->neigh2);
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
@@ -330,9 +324,12 @@ static int rose_del_node(struct rose_route_struct *rose_route,
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
+ rose_neigh_put(rose_neigh);
- if (rose_neigh->count == 0 && rose_neigh->use == 0)
+ if (rose_neigh->count == 0) {
rose_remove_neigh(rose_neigh);
+ rose_neigh_put(rose_neigh);
+ }
rose_node->count--;
@@ -381,11 +378,11 @@ void rose_add_loopback_neigh(void)
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
- sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
+ refcount_set(&sn->use, 1);
skb_queue_head_init(&sn->queue);
@@ -436,6 +433,7 @@ int rose_add_loopback_node(const rose_address *address)
rose_node_list = rose_node;
rose_loopback_neigh->count++;
+ rose_neigh_hold(rose_loopback_neigh);
out:
spin_unlock_bh(&rose_node_list_lock);
@@ -467,6 +465,7 @@ void rose_del_loopback_node(const rose_address *address)
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
+ rose_neigh_put(rose_loopback_neigh);
out:
spin_unlock_bh(&rose_node_list_lock);
@@ -497,22 +496,16 @@ void rose_rt_device_down(struct net_device *dev)
t = rose_node;
rose_node = rose_node->next;
- for (i = 0; i < t->count; i++) {
+ for (i = t->count - 1; i >= 0; i--) {
if (t->neighbour[i] != s)
continue;
t->count--;
- switch (i) {
- case 0:
- t->neighbour[0] = t->neighbour[1];
- fallthrough;
- case 1:
- t->neighbour[1] = t->neighbour[2];
- break;
- case 2:
- break;
- }
+ memmove(&t->neighbour[i], &t->neighbour[i + 1],
+ sizeof(t->neighbour[0]) *
+ (t->count - i));
+ rose_neigh_put(s);
}
if (t->count <= 0)
@@ -520,6 +513,7 @@ void rose_rt_device_down(struct net_device *dev)
}
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
@@ -555,6 +549,7 @@ static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
+ int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
@@ -565,17 +560,21 @@ static int rose_clear_routes(void)
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
- if (!t->loopback)
+
+ if (!t->loopback) {
+ for (i = 0; i < t->count; i++)
+ rose_neigh_put(t->neighbour[i]);
rose_remove_node(t);
+ }
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
- if (s->use == 0 && !s->loopback) {
- s->count = 0;
+ if (!s->loopback) {
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
}
@@ -691,6 +690,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
+ rose_neigh_hold(node->neighbour[i]);
goto out;
}
}
@@ -702,6 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
+ rose_neigh_hold(node->neighbour[i]);
goto out;
}
failed = 1;
@@ -791,13 +792,13 @@ static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
}
if (rose_route->neigh1 == rose_neigh) {
- rose_route->neigh1->use--;
+ rose_neigh_put(rose_route->neigh1);
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
- rose_route->neigh2->use--;
+ rose_neigh_put(rose_route->neigh2);
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
@@ -926,7 +927,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
@@ -1051,12 +1052,12 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
- goto out;
+ goto put_neigh;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
- goto out;
+ goto put_neigh;
}
rose_route->lci1 = lci;
@@ -1069,8 +1070,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
- rose_route->neigh1->use++;
- rose_route->neigh2->use++;
+ rose_neigh_hold(rose_route->neigh1);
+ rose_neigh_hold(rose_route->neigh2);
rose_route->next = rose_route_list;
rose_route_list = rose_route;
@@ -1082,6 +1083,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
+put_neigh:
+ rose_neigh_put(new_neigh);
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
@@ -1197,7 +1200,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
- rose_neigh->use,
+ refcount_read(&rose_neigh->use) - rose_neigh->count - 1,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
@@ -1302,18 +1305,22 @@ void __exit rose_rt_free(void)
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
+ int i;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
+ for (i = 0; i < t->count; i++)
+ rose_neigh_put(t->neighbour[i]);
rose_remove_node(t);
}
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index 020369c49587..bb60a1654d61 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -180,7 +180,7 @@ static void rose_timer_expiry(struct timer_list *t)
break;
case ROSE_STATE_2: /* T3 */
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose_disconnect(sk, ETIMEDOUT, -1, -1);
break;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5bd3922c310d..5b7342d43486 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -44,6 +44,7 @@ enum rxrpc_skb_mark {
RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
+ RXRPC_SKB_MARK_REJECT_CONN_ABORT, /* Reject with connection ABORT (code in skb->priority) */
};
/*
@@ -361,12 +362,15 @@ struct rxrpc_local {
struct list_head new_client_calls; /* Newly created client calls need connection */
spinlock_t client_call_lock; /* Lock for ->new_client_calls */
struct sockaddr_rxrpc srx; /* local address */
- /* Provide a kvec table sufficiently large to manage either a DATA
- * packet with a maximum set of jumbo subpackets or a PING ACK padded
- * out to 64K with zeropages for PMTUD.
- */
- struct kvec kvec[1 + RXRPC_MAX_NR_JUMBO > 3 + 16 ?
- 1 + RXRPC_MAX_NR_JUMBO : 3 + 16];
+ union {
+ /* Provide a kvec table sufficiently large to manage either a
+ * DATA packet with a maximum set of jumbo subpackets or a PING
+ * ACK padded out to 64K with zeropages for PMTUD.
+ */
+ struct kvec kvec[1 + RXRPC_MAX_NR_JUMBO > 3 + 16 ?
+ 1 + RXRPC_MAX_NR_JUMBO : 3 + 16];
+ struct bio_vec bvec[3 + 16];
+ };
};
/*
@@ -1250,6 +1254,8 @@ int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
void rxrpc_error_report(struct sock *);
bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
s32 abort_code, int err);
+bool rxrpc_direct_conn_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+ s32 abort_code, int err);
int rxrpc_io_thread(void *data);
void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
@@ -1380,6 +1386,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *);
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp);
+void rxrpc_assess_MTU_size(struct rxrpc_local *local, struct rxrpc_peer *peer);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
enum rxrpc_peer_trace);
void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index a4b363b47cca..00982a030744 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
id_in_use:
write_unlock(&rx->call_lock);
+ rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT);
rxrpc_cleanup_call(call);
_leave(" = -EBADSLT");
return -EBADSLT;
@@ -218,6 +219,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
tail = b->call_backlog_tail;
while (CIRC_CNT(head, tail, size) > 0) {
struct rxrpc_call *call = b->call_backlog[tail];
+ rxrpc_see_call(call, rxrpc_call_see_discard);
rcu_assign_pointer(call->socket, rx);
if (rx->app_ops &&
rx->app_ops->discard_new_call) {
@@ -254,6 +256,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
unsigned short call_tail, conn_tail, peer_tail;
unsigned short call_count, conn_count;
+ if (!b)
+ return NULL;
+
/* #calls >= #conns >= #peers must hold true. */
call_head = smp_load_acquire(&b->call_backlog_head);
call_tail = b->call_backlog_tail;
@@ -369,8 +374,8 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
- rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
- RX_INVALID_OPERATION, -ESHUTDOWN);
+ rxrpc_direct_conn_abort(skb, rxrpc_abort_shut_down,
+ RX_INVALID_OPERATION, -ESHUTDOWN);
goto no_call;
}
@@ -402,6 +407,7 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
spin_unlock(&rx->incoming_lock);
read_unlock_irq(&local->services_lock);
+ rxrpc_assess_MTU_size(local, call->peer);
if (hlist_unhashed(&call->error_link)) {
spin_lock_irq(&call->peer->lock);
@@ -416,12 +422,12 @@ bool rxrpc_new_incoming_call(struct rxrpc_local *local,
unsupported_service:
read_unlock_irq(&local->services_lock);
- return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
- RX_INVALID_OPERATION, -EOPNOTSUPP);
+ return rxrpc_direct_conn_abort(skb, rxrpc_abort_service_not_offered,
+ RX_INVALID_OPERATION, -EOPNOTSUPP);
unsupported_security:
read_unlock_irq(&local->services_lock);
- return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
- RX_INVALID_OPERATION, -EKEYREJECTED);
+ return rxrpc_direct_conn_abort(skb, rxrpc_abort_service_not_offered,
+ RX_INVALID_OPERATION, -EKEYREJECTED);
no_call:
spin_unlock(&rx->incoming_lock);
read_unlock_irq(&local->services_lock);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 15067ff7b1f2..918f41d97a2f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -561,7 +561,7 @@ static void rxrpc_cleanup_rx_buffers(struct rxrpc_call *call)
void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
- bool put = false, putu = false;
+ bool putu = false;
_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
@@ -573,23 +573,13 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
rxrpc_put_call_slot(call);
- /* Make sure we don't get any more notifications */
+ /* Note that at this point, the call may still be on or may have been
+ * added back on to the socket receive queue. recvmsg() must discard
+ * released calls. The CALL_RELEASED flag should prevent further
+ * notifications.
+ */
spin_lock_irq(&rx->recvmsg_lock);
-
- if (!list_empty(&call->recvmsg_link)) {
- _debug("unlinking once-pending call %p { e=%lx f=%lx }",
- call, call->events, call->flags);
- list_del(&call->recvmsg_link);
- put = true;
- }
-
- /* list_empty() must return false in rxrpc_notify_socket() */
- call->recvmsg_link.next = NULL;
- call->recvmsg_link.prev = NULL;
-
spin_unlock_irq(&rx->recvmsg_lock);
- if (put)
- rxrpc_put_call(call, rxrpc_call_put_unnotify);
write_lock(&rx->call_lock);
@@ -638,6 +628,12 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
rxrpc_put_call(call, rxrpc_call_put_release_sock);
}
+ while ((call = list_first_entry_or_null(&rx->recvmsg_q,
+ struct rxrpc_call, recvmsg_link))) {
+ list_del_init(&call->recvmsg_link);
+ rxrpc_put_call(call, rxrpc_call_put_release_recvmsg_q);
+ }
+
_leave("");
}
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 27b650d30f4d..e939ecf417c4 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -97,6 +97,20 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
return false;
}
+/*
+ * Directly produce a connection abort from a packet.
+ */
+bool rxrpc_direct_conn_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+ s32 abort_code, int err)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ trace_rxrpc_abort(0, why, sp->hdr.cid, 0, sp->hdr.seq, abort_code, err);
+ skb->mark = RXRPC_SKB_MARK_REJECT_CONN_ABORT;
+ skb->priority = abort_code;
+ return false;
+}
+
static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
{
return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 0af19bcdc80a..8b5903b6e481 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -814,6 +814,9 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
__be32 code;
int ret, ioc;
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
+ return; /* Never abort an abort. */
+
rxrpc_see_skb(skb, rxrpc_skb_see_reject);
iov[0].iov_base = &whdr;
@@ -826,7 +829,13 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
msg.msg_controllen = 0;
msg.msg_flags = 0;
- memset(&whdr, 0, sizeof(whdr));
+ whdr = (struct rxrpc_wire_header) {
+ .epoch = htonl(sp->hdr.epoch),
+ .cid = htonl(sp->hdr.cid),
+ .callNumber = htonl(sp->hdr.callNumber),
+ .serviceId = htons(sp->hdr.serviceId),
+ .flags = ~sp->hdr.flags & RXRPC_CLIENT_INITIATED,
+ };
switch (skb->mark) {
case RXRPC_SKB_MARK_REJECT_BUSY:
@@ -834,6 +843,9 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
size = sizeof(whdr);
ioc = 1;
break;
+ case RXRPC_SKB_MARK_REJECT_CONN_ABORT:
+ whdr.callNumber = 0;
+ fallthrough;
case RXRPC_SKB_MARK_REJECT_ABORT:
whdr.type = RXRPC_PACKET_TYPE_ABORT;
code = htonl(skb->priority);
@@ -847,14 +859,6 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
msg.msg_namelen = srx.transport_len;
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.serviceId = htons(sp->hdr.serviceId);
- whdr.flags = sp->hdr.flags;
- whdr.flags ^= RXRPC_CLIENT_INITIATED;
- whdr.flags &= RXRPC_CLIENT_INITIATED;
-
iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size);
ret = do_udp_sendmsg(local->socket, &msg, size);
if (ret < 0)
@@ -924,7 +928,7 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response
{
struct rxrpc_skb_priv *sp = rxrpc_skb(response);
struct scatterlist sg[16];
- struct bio_vec bvec[16];
+ struct bio_vec *bvec = conn->local->bvec;
struct msghdr msg;
size_t len = sp->resp.len;
__be32 wserial;
@@ -938,6 +942,9 @@ void rxrpc_send_response(struct rxrpc_connection *conn, struct sk_buff *response
if (ret < 0)
goto fail;
nr_sg = ret;
+ ret = -EIO;
+ if (WARN_ON_ONCE(nr_sg > ARRAY_SIZE(conn->local->bvec)))
+ goto fail;
for (int i = 0; i < nr_sg; i++)
bvec_set_page(&bvec[i], sg_page(&sg[i]), sg[i].length, sg[i].offset);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index e2f35e6c04d6..366431b0736c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -149,8 +149,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is
* reached
*/
-static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
- struct rxrpc_peer *peer)
+void rxrpc_assess_MTU_size(struct rxrpc_local *local, struct rxrpc_peer *peer)
{
struct net *net = local->net;
struct dst_entry *dst;
@@ -277,8 +276,6 @@ static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
peer->hdrsize += sizeof(struct rxrpc_wire_header);
peer->max_data = peer->if_mtu - peer->hdrsize;
-
- rxrpc_assess_MTU_size(local, peer);
}
/*
@@ -297,6 +294,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
rxrpc_init_peer(local, peer, hash_key);
+ rxrpc_assess_MTU_size(local, peer);
}
_leave(" = %p", peer);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 86a27fb55a1c..7fa7e77f6bb9 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -29,6 +29,10 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
if (!list_empty(&call->recvmsg_link))
return;
+ if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
+ rxrpc_see_call(call, rxrpc_call_see_notify_released);
+ return;
+ }
rcu_read_lock();
@@ -447,6 +451,16 @@ try_again:
goto try_again;
}
+ rxrpc_see_call(call, rxrpc_call_see_recvmsg);
+ if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
+ rxrpc_see_call(call, rxrpc_call_see_already_released);
+ list_del_init(&call->recvmsg_link);
+ spin_unlock_irq(&rx->recvmsg_lock);
+ release_sock(&rx->sk);
+ trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
+ rxrpc_put_call(call, rxrpc_call_put_recvmsg);
+ goto try_again;
+ }
if (!(flags & MSG_PEEK))
list_del_init(&call->recvmsg_link);
else
@@ -470,8 +484,13 @@ try_again:
release_sock(&rx->sk);
- if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
- BUG();
+ if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
+ rxrpc_see_call(call, rxrpc_call_see_already_released);
+ mutex_unlock(&call->user_mutex);
+ if (!(flags & MSG_PEEK))
+ rxrpc_put_call(call, rxrpc_call_put_recvmsg);
+ goto try_again;
+ }
ret = rxrpc_recvmsg_user_id(call, msg, flags);
if (ret < 0)
diff --git a/net/rxrpc/rxgk.c b/net/rxrpc/rxgk.c
index 1e19c605bcc8..dce5a3d8a964 100644
--- a/net/rxrpc/rxgk.c
+++ b/net/rxrpc/rxgk.c
@@ -475,7 +475,7 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
struct krb5_buffer metadata;
unsigned int offset = sp->offset, len = sp->len;
size_t data_offset = 0, data_len = len;
- u32 ac;
+ u32 ac = 0;
int ret = -ENOMEM;
_enter("");
@@ -499,9 +499,10 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata,
skb, &offset, &len, &ac);
kfree(hdr);
- if (ret == -EPROTO) {
- rxrpc_abort_eproto(call, skb, ac,
- rxgk_abort_1_verify_mic_eproto);
+ if (ret < 0) {
+ if (ret != -ENOMEM)
+ rxrpc_abort_eproto(call, skb, ac,
+ rxgk_abort_1_verify_mic_eproto);
} else {
sp->offset = offset;
sp->len = len;
@@ -524,15 +525,16 @@ static int rxgk_verify_packet_encrypted(struct rxrpc_call *call,
struct rxgk_header hdr;
unsigned int offset = sp->offset, len = sp->len;
int ret;
- u32 ac;
+ u32 ac = 0;
_enter("");
ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac);
- if (ret == -EPROTO)
- rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
- if (ret < 0)
+ if (ret < 0) {
+ if (ret != -ENOMEM)
+ rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
goto error;
+ }
if (len < sizeof(hdr)) {
ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c
index b94b77a1c317..30275cb5ba3e 100644
--- a/net/rxrpc/rxgk_app.c
+++ b/net/rxrpc/rxgk_app.c
@@ -54,6 +54,10 @@ int rxgk_yfs_decode_ticket(struct rxrpc_connection *conn, struct sk_buff *skb,
_enter("");
+ if (ticket_len < 10 * sizeof(__be32))
+ return rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO,
+ rxgk_abort_resp_short_yfs_tkt);
+
/* Get the session key length */
ret = skb_copy_bits(skb, ticket_offset, tmp, sizeof(tmp));
if (ret < 0)
@@ -187,7 +191,7 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
struct key *server_key;
unsigned int ticket_offset, ticket_len;
u32 kvno, enctype;
- int ret, ec;
+ int ret, ec = 0;
struct {
__be32 kvno;
@@ -195,22 +199,23 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
__be32 token_len;
} container;
+ if (token_len < sizeof(container))
+ goto short_packet;
+
/* Decode the RXGK_TokenContainer object. This tells us which server
* key we should be using. We can then fetch the key, get the secret
* and set up the crypto to extract the token.
*/
if (skb_copy_bits(skb, token_offset, &container, sizeof(container)) < 0)
- return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
- rxgk_abort_resp_tok_short);
+ goto short_packet;
kvno = ntohl(container.kvno);
enctype = ntohl(container.enctype);
ticket_len = ntohl(container.token_len);
ticket_offset = token_offset + sizeof(container);
- if (xdr_round_up(ticket_len) > token_len - 3 * 4)
- return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
- rxgk_abort_resp_tok_short);
+ if (xdr_round_up(ticket_len) > token_len - sizeof(container))
+ goto short_packet;
_debug("KVNO %u", kvno);
_debug("ENC %u", enctype);
@@ -236,9 +241,11 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
&ticket_offset, &ticket_len, &ec);
crypto_free_aead(token_enc);
token_enc = NULL;
- if (ret < 0)
- return rxrpc_abort_conn(conn, skb, ec, ret,
- rxgk_abort_resp_tok_dec);
+ if (ret < 0) {
+ if (ret != -ENOMEM)
+ return rxrpc_abort_conn(conn, skb, ec, ret,
+ rxgk_abort_resp_tok_dec);
+ }
ret = conn->security->default_decode_ticket(conn, skb, ticket_offset,
ticket_len, _key);
@@ -283,4 +290,8 @@ temporary_error:
* also come out this way if the ticket decryption fails.
*/
return ret;
+
+short_packet:
+ return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
+ rxgk_abort_resp_tok_short);
}
diff --git a/net/rxrpc/rxgk_common.h b/net/rxrpc/rxgk_common.h
index 7370a5655985..80164d89e19c 100644
--- a/net/rxrpc/rxgk_common.h
+++ b/net/rxrpc/rxgk_common.h
@@ -88,11 +88,16 @@ int rxgk_decrypt_skb(const struct krb5_enctype *krb5,
*_offset += offset;
*_len = len;
break;
+ case -EBADMSG: /* Checksum mismatch. */
case -EPROTO:
- case -EBADMSG:
*_error_code = RXGK_SEALEDINCON;
break;
+ case -EMSGSIZE:
+ *_error_code = RXGK_PACKETSHORT;
+ break;
+ case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */
default:
+ *_error_code = RXGK_INCONSISTENCY;
break;
}
@@ -127,11 +132,16 @@ int rxgk_verify_mic_skb(const struct krb5_enctype *krb5,
*_offset += offset;
*_len = len;
break;
+ case -EBADMSG: /* Checksum mismatch */
case -EPROTO:
- case -EBADMSG:
*_error_code = RXGK_SEALEDINCON;
break;
+ case -EMSGSIZE:
+ *_error_code = RXGK_PACKETSHORT;
+ break;
+ case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */
default:
+ *_error_code = RXGK_INCONSISTENCY;
break;
}
diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
index 0377301156b0..2ea71e3831f7 100644
--- a/net/rxrpc/rxperf.c
+++ b/net/rxrpc/rxperf.c
@@ -630,7 +630,7 @@ static int __init rxperf_init(void)
pr_info("Server registering\n");
- rxperf_workqueue = alloc_workqueue("rxperf", 0, 0);
+ rxperf_workqueue = alloc_workqueue("rxperf", WQ_PERCPU, 0);
if (!rxperf_workqueue)
goto error_workqueue;
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 078d91a6b77f..2bfbf2b2bb37 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -140,15 +140,15 @@ const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
sec = rxrpc_security_lookup(sp->hdr.securityIndex);
if (!sec) {
- rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security,
- RX_INVALID_OPERATION, -EKEYREJECTED);
+ rxrpc_direct_conn_abort(skb, rxrpc_abort_unsupported_security,
+ RX_INVALID_OPERATION, -EKEYREJECTED);
return NULL;
}
if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
!rx->securities) {
- rxrpc_direct_abort(skb, rxrpc_abort_no_service_key,
- sec->no_key_abort, -EKEYREJECTED);
+ rxrpc_direct_conn_abort(skb, rxrpc_abort_no_service_key,
+ sec->no_key_abort, -EKEYREJECTED);
return NULL;
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index ad914d2b2e22..6ddff028b81a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -415,6 +415,18 @@ config NET_SCH_BPF
If unsure, say N.
+config NET_SCH_DUALPI2
+ tristate "Dual Queue PI Square (DUALPI2) scheduler"
+ help
+ Say Y here if you want to use the Dual Queue Proportional Integral
+ Controller Improved with a Square scheduling algorithm.
+ For more information, please see https://tools.ietf.org/html/rfc9332
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_dualpi2.
+
+ If unsure, say N.
+
menuconfig NET_SCH_DEFAULT
bool "Allow override default queue discipline"
help
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 904d784902d1..5078ea84e6ad 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
obj-$(CONFIG_NET_SCH_BPF) += bpf_qdisc.o
+obj-$(CONFIG_NET_SCH_DUALPI2) += sch_dualpi2.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 057e20cef375..ff6be5cfe2b0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -933,18 +933,25 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
struct tcf_idrinfo *idrinfo)
{
struct idr *idr = &idrinfo->action_idr;
+ bool mutex_taken = false;
struct tc_action *p;
- int ret;
unsigned long id = 1;
unsigned long tmp;
+ int ret;
idr_for_each_entry_ul(idr, p, tmp, id) {
+ if (tc_act_in_hw(p) && !mutex_taken) {
+ rtnl_lock();
+ mutex_taken = true;
+ }
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED)
module_put(ops->owner);
else if (ret < 0)
return;
}
+ if (mutex_taken)
+ rtnl_unlock();
idr_destroy(&idrinfo->action_idr);
}
EXPORT_SYMBOL(tcf_idrinfo_destroy);
@@ -1578,7 +1585,7 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
}
_bstats_update(&a->tcfa_bstats, bytes, packets);
- a->tcfa_qstats.drops += drops;
+ atomic_add(drops, &a->tcfa_drops);
if (hw)
_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
}
@@ -1587,8 +1594,9 @@ EXPORT_SYMBOL(tcf_action_update_stats);
int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
int compat_mode)
{
- int err = 0;
+ struct gnet_stats_queue qstats = {0};
struct gnet_dump d;
+ int err = 0;
if (p == NULL)
goto errout;
@@ -1612,14 +1620,17 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
if (err < 0)
goto errout;
+ qstats.drops = atomic_read(&p->tcfa_drops);
+ qstats.overlimits = atomic_read(&p->tcfa_overlimits);
+
if (gnet_stats_copy_basic(&d, p->cpu_bstats,
&p->tcfa_bstats, false) < 0 ||
gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
&p->tcfa_bstats_hw, false) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
gnet_stats_copy_queue(&d, p->cpu_qstats,
- &p->tcfa_qstats,
- p->tcfa_qstats.qlen) < 0)
+ &qstats,
+ qstats.qlen) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 0fce631e7c91..3e89927d7116 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -88,7 +88,7 @@ count:
/* using overlimits stats to count how many packets marked */
tcf_action_inc_overlimit_qstats(&ca->common);
out:
- return READ_ONCE(ca->tcf_action);
+ return parms->action;
}
static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
@@ -167,6 +167,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
if (err < 0)
goto release_idr;
+ nparms->action = parm->action;
+
spin_lock_bh(&ci->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
oparms = rcu_replace_pointer(ci->parms, nparms, lockdep_is_held(&ci->tcf_lock));
@@ -190,20 +192,20 @@ out_free:
static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
+ const struct tcf_connmark_info *ci = to_connmark(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_connmark_info *ci = to_connmark(a);
+ const struct tcf_connmark_parms *parms;
struct tc_connmark opt = {
.index = ci->tcf_index,
.refcnt = refcount_read(&ci->tcf_refcnt) - ref,
.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
};
- struct tcf_connmark_parms *parms;
struct tcf_t t;
- spin_lock_bh(&ci->tcf_lock);
- parms = rcu_dereference_protected(ci->parms, lockdep_is_held(&ci->tcf_lock));
+ rcu_read_lock();
+ parms = rcu_dereference(ci->parms);
- opt.action = ci->tcf_action;
+ opt.action = parms->action;
opt.zone = parms->zone;
if (nla_put(skb, TCA_CONNMARK_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -212,12 +214,12 @@ static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_CONNMARK_TM, sizeof(t), &t,
TCA_CONNMARK_PAD))
goto nla_put_failure;
- spin_unlock_bh(&ci->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&ci->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 5cc8e407e791..0939e6b2ba4d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -99,6 +99,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
goto put_chain;
}
params_new->update_flags = parm->update_flags;
+ params_new->action = parm->action;
spin_lock_bh(&p->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -580,7 +581,7 @@ TC_INDIRECT_SCOPE int tcf_csum_act(struct sk_buff *skb,
tcf_lastuse_update(&p->tcf_tm);
tcf_action_update_bstats(&p->common, skb);
- action = READ_ONCE(p->tcf_action);
+ action = params->action;
if (unlikely(action == TC_ACT_SHOT))
goto drop;
@@ -631,9 +632,9 @@ drop:
static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
int ref)
{
+ const struct tcf_csum *p = to_tcf_csum(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_csum *p = to_tcf_csum(a);
- struct tcf_csum_params *params;
+ const struct tcf_csum_params *params;
struct tc_csum opt = {
.index = p->tcf_index,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
@@ -641,10 +642,9 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
};
struct tcf_t t;
- spin_lock_bh(&p->tcf_lock);
- params = rcu_dereference_protected(p->params,
- lockdep_is_held(&p->tcf_lock));
- opt.action = p->tcf_action;
+ rcu_read_lock();
+ params = rcu_dereference(p->params);
+ opt.action = params->action;
opt.update_flags = params->update_flags;
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
@@ -653,12 +653,12 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
goto nla_put_failure;
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index c02f39efc6ef..6749a4a9a9cd 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -977,7 +977,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
p = rcu_dereference_bh(c->params);
- retval = READ_ONCE(c->tcf_action);
+ retval = p->action;
commit = p->ct_action & TCA_CT_ACT_COMMIT;
clear = p->ct_action & TCA_CT_ACT_CLEAR;
tmpl = p->tmpl;
@@ -1409,6 +1409,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
+ params->action = parm->action;
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params = rcu_replace_pointer(c->params, params,
@@ -1442,8 +1443,8 @@ static void tcf_ct_cleanup(struct tc_action *a)
}
static int tcf_ct_dump_key_val(struct sk_buff *skb,
- void *val, int val_type,
- void *mask, int mask_type,
+ const void *val, int val_type,
+ const void *mask, int mask_type,
int len)
{
int err;
@@ -1464,9 +1465,9 @@ static int tcf_ct_dump_key_val(struct sk_buff *skb,
return 0;
}
-static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
+static int tcf_ct_dump_nat(struct sk_buff *skb, const struct tcf_ct_params *p)
{
- struct nf_nat_range2 *range = &p->range;
+ const struct nf_nat_range2 *range = &p->range;
if (!(p->ct_action & TCA_CT_ACT_NAT))
return 0;
@@ -1504,7 +1505,8 @@ static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
return 0;
}
-static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
+static int tcf_ct_dump_helper(struct sk_buff *skb,
+ const struct nf_conntrack_helper *helper)
{
if (!helper)
return 0;
@@ -1521,9 +1523,8 @@ static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_ct *c = to_ct(a);
- struct tcf_ct_params *p;
-
+ const struct tcf_ct *c = to_ct(a);
+ const struct tcf_ct_params *p;
struct tc_ct opt = {
.index = c->tcf_index,
.refcnt = refcount_read(&c->tcf_refcnt) - ref,
@@ -1531,10 +1532,9 @@ static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- spin_lock_bh(&c->tcf_lock);
- p = rcu_dereference_protected(c->params,
- lockdep_is_held(&c->tcf_lock));
- opt.action = c->tcf_action;
+ rcu_read_lock();
+ p = rcu_dereference(c->params);
+ opt.action = p->action;
if (tcf_ct_dump_key_val(skb,
&p->ct_action, TCA_CT_ACTION,
@@ -1579,11 +1579,11 @@ skip_dump:
tcf_tm_dump(&t, &c->tcf_tm);
if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
goto nla_put_failure;
- spin_unlock_bh(&c->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&c->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 5b1241ddc758..71efe04d00b5 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -44,9 +44,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
ipv4_change_dsfield(ip_hdr(skb),
INET_ECN_MASK,
newdscp);
- ca->stats_dscp_set++;
+ atomic64_inc(&ca->stats_dscp_set);
} else {
- ca->stats_dscp_error++;
+ atomic64_inc(&ca->stats_dscp_error);
}
}
break;
@@ -57,9 +57,9 @@ static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
ipv6_change_dsfield(ipv6_hdr(skb),
INET_ECN_MASK,
newdscp);
- ca->stats_dscp_set++;
+ atomic64_inc(&ca->stats_dscp_set);
} else {
- ca->stats_dscp_error++;
+ atomic64_inc(&ca->stats_dscp_error);
}
}
break;
@@ -72,7 +72,7 @@ static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
struct tcf_ctinfo_params *cp,
struct sk_buff *skb)
{
- ca->stats_cpmark_set++;
+ atomic64_inc(&ca->stats_cpmark_set);
skb->mark = READ_ONCE(ct->mark) & cp->cpmarkmask;
}
@@ -88,13 +88,11 @@ TC_INDIRECT_SCOPE int tcf_ctinfo_act(struct sk_buff *skb,
struct tcf_ctinfo_params *cp;
struct nf_conn *ct;
int proto, wlen;
- int action;
cp = rcu_dereference_bh(ca->params);
tcf_lastuse_update(&ca->tcf_tm);
tcf_action_update_bstats(&ca->common, skb);
- action = READ_ONCE(ca->tcf_action);
wlen = skb_network_offset(skb);
switch (skb_protocol(skb, true)) {
@@ -141,7 +139,7 @@ TC_INDIRECT_SCOPE int tcf_ctinfo_act(struct sk_buff *skb,
if (thash)
nf_ct_put(ct);
out:
- return action;
+ return cp->action;
}
static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
@@ -258,6 +256,8 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
cp_new->mode |= CTINFO_MODE_CPMARK;
}
+ cp_new->action = actparm->action;
+
spin_lock_bh(&ci->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch);
cp_new = rcu_replace_pointer(ci->params, cp_new,
@@ -282,25 +282,24 @@ release_idr:
static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
- struct tcf_ctinfo *ci = to_ctinfo(a);
+ const struct tcf_ctinfo *ci = to_ctinfo(a);
+ unsigned char *b = skb_tail_pointer(skb);
+ const struct tcf_ctinfo_params *cp;
struct tc_ctinfo opt = {
.index = ci->tcf_index,
.refcnt = refcount_read(&ci->tcf_refcnt) - ref,
.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
};
- unsigned char *b = skb_tail_pointer(skb);
- struct tcf_ctinfo_params *cp;
struct tcf_t t;
- spin_lock_bh(&ci->tcf_lock);
- cp = rcu_dereference_protected(ci->params,
- lockdep_is_held(&ci->tcf_lock));
+ rcu_read_lock();
+ cp = rcu_dereference(ci->params);
tcf_tm_dump(&t, &ci->tcf_tm);
if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
goto nla_put_failure;
- opt.action = ci->tcf_action;
+ opt.action = cp->action;
if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt))
goto nla_put_failure;
@@ -323,22 +322,25 @@ static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
}
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
- ci->stats_dscp_set, TCA_CTINFO_PAD))
+ atomic64_read(&ci->stats_dscp_set),
+ TCA_CTINFO_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
- ci->stats_dscp_error, TCA_CTINFO_PAD))
+ atomic64_read(&ci->stats_dscp_error),
+ TCA_CTINFO_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
- ci->stats_cpmark_set, TCA_CTINFO_PAD))
+ atomic64_read(&ci->stats_cpmark_set),
+ TCA_CTINFO_PAD))
goto nla_put_failure;
- spin_unlock_bh(&ci->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&ci->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 9f86f4e666d3..6654011dcd2b 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -57,7 +57,7 @@ TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb,
struct tcf_mpls *m = to_mpls(a);
struct tcf_mpls_params *p;
__be32 new_lse;
- int ret, mac_len;
+ int mac_len;
tcf_lastuse_update(&m->tcf_tm);
bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb);
@@ -72,8 +72,6 @@ TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb,
mac_len = skb_network_offset(skb);
}
- ret = READ_ONCE(m->tcf_action);
-
p = rcu_dereference_bh(m->mpls_p);
switch (p->tcfm_action) {
@@ -122,7 +120,7 @@ TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb,
if (skb_at_tc_ingress(skb))
skb_pull_rcsum(skb, skb->mac_len);
- return ret;
+ return p->action;
drop:
qstats_drop_inc(this_cpu_ptr(m->common.cpu_qstats));
@@ -296,6 +294,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
ACT_MPLS_BOS_NOT_SET);
p->tcfm_proto = nla_get_be16_default(tb[TCA_MPLS_PROTO],
htons(ETH_P_MPLS_UC));
+ p->action = parm->action;
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -330,8 +329,8 @@ static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_mpls *m = to_mpls(a);
- struct tcf_mpls_params *p;
+ const struct tcf_mpls *m = to_mpls(a);
+ const struct tcf_mpls_params *p;
struct tc_mpls opt = {
.index = m->tcf_index,
.refcnt = refcount_read(&m->tcf_refcnt) - ref,
@@ -339,10 +338,10 @@ static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- spin_lock_bh(&m->tcf_lock);
- opt.action = m->tcf_action;
- p = rcu_dereference_protected(m->mpls_p, lockdep_is_held(&m->tcf_lock));
+ rcu_read_lock();
+ p = rcu_dereference(m->mpls_p);
opt.m_action = p->tcfm_action;
+ opt.action = p->action;
if (nla_put(skb, TCA_MPLS_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -370,12 +369,12 @@ static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD))
goto nla_put_failure;
- spin_unlock_bh(&m->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&m->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d541f553805f..26241d80ebe0 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -91,6 +91,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
nparm->new_addr = parm->new_addr;
nparm->mask = parm->mask;
nparm->flags = parm->flags;
+ nparm->action = parm->action;
p = to_tcf_nat(*a);
@@ -130,17 +131,16 @@ TC_INDIRECT_SCOPE int tcf_nat_act(struct sk_buff *skb,
tcf_lastuse_update(&p->tcf_tm);
tcf_action_update_bstats(&p->common, skb);
- action = READ_ONCE(p->tcf_action);
-
parms = rcu_dereference_bh(p->parms);
+ action = parms->action;
+ if (unlikely(action == TC_ACT_SHOT))
+ goto drop;
+
old_addr = parms->old_addr;
new_addr = parms->new_addr;
mask = parms->mask;
egress = parms->flags & TCA_NAT_FLAG_EGRESS;
- if (unlikely(action == TC_ACT_SHOT))
- goto drop;
-
noff = skb_network_offset(skb);
if (!pskb_may_pull(skb, sizeof(*iph) + noff))
goto drop;
@@ -268,21 +268,20 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_nat *p = to_tcf_nat(a);
+ const struct tcf_nat *p = to_tcf_nat(a);
+ const struct tcf_nat_parms *parms;
struct tc_nat opt = {
.index = p->tcf_index,
.refcnt = refcount_read(&p->tcf_refcnt) - ref,
.bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
};
- struct tcf_nat_parms *parms;
struct tcf_t t;
- spin_lock_bh(&p->tcf_lock);
-
- opt.action = p->tcf_action;
+ rcu_read_lock();
- parms = rcu_dereference_protected(p->parms, lockdep_is_held(&p->tcf_lock));
+ parms = rcu_dereference(p->parms);
+ opt.action = parms->action;
opt.old_addr = parms->old_addr;
opt.new_addr = parms->new_addr;
opt.mask = parms->mask;
@@ -294,12 +293,12 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
goto nla_put_failure;
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fc0a35a7b62a..4b65901397a8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -279,7 +279,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
}
p = to_pedit(*a);
-
+ nparms->action = parm->action;
spin_lock_bh(&p->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
oparms = rcu_replace_pointer(p->parms, nparms, 1);
@@ -483,7 +483,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
bad:
tcf_action_inc_overlimit_qstats(&p->common);
done:
- return p->tcf_action;
+ return parms->action;
}
static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u64 packets,
@@ -500,19 +500,19 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_pedit *p = to_pedit(a);
- struct tcf_pedit_parms *parms;
+ const struct tcf_pedit *p = to_pedit(a);
+ const struct tcf_pedit_parms *parms;
struct tc_pedit *opt;
struct tcf_t t;
int s;
- spin_lock_bh(&p->tcf_lock);
- parms = rcu_dereference_protected(p->parms, 1);
+ rcu_read_lock();
+ parms = rcu_dereference(p->parms);
s = struct_size(opt, keys, parms->tcfp_nkeys);
opt = kzalloc(s, GFP_ATOMIC);
if (unlikely(!opt)) {
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
return -ENOBUFS;
}
opt->nkeys = parms->tcfp_nkeys;
@@ -521,7 +521,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
opt->flags = parms->tcfp_flags;
- opt->action = p->tcf_action;
+ opt->action = parms->action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
@@ -540,13 +540,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &p->tcf_tm);
if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
goto nla_put_failure;
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
kfree(opt);
return skb->len;
nla_put_failure:
- spin_unlock_bh(&p->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
kfree(opt);
return -1;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a214ed681142..0e1c61183379 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -198,6 +198,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
psched_ppscfg_precompute(&new->ppsrate, pps);
}
+ new->action = parm->action;
spin_lock_bh(&police->tcf_lock);
spin_lock_bh(&police->tcfp_lock);
police->tcfp_t_c = ktime_get_ns();
@@ -254,8 +255,8 @@ TC_INDIRECT_SCOPE int tcf_police_act(struct sk_buff *skb,
tcf_lastuse_update(&police->tcf_tm);
bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb);
- ret = READ_ONCE(police->tcf_action);
p = rcu_dereference_bh(police->params);
+ ret = p->action;
if (p->tcfp_ewma_rate) {
struct gnet_stats_rate_est64 sample;
@@ -338,9 +339,9 @@ static void tcf_police_stats_update(struct tc_action *a,
static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
+ const struct tcf_police *police = to_police(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_police *police = to_police(a);
- struct tcf_police_params *p;
+ const struct tcf_police_params *p;
struct tc_police opt = {
.index = police->tcf_index,
.refcnt = refcount_read(&police->tcf_refcnt) - ref,
@@ -348,10 +349,9 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- spin_lock_bh(&police->tcf_lock);
- opt.action = police->tcf_action;
- p = rcu_dereference_protected(police->params,
- lockdep_is_held(&police->tcf_lock));
+ rcu_read_lock();
+ p = rcu_dereference(police->params);
+ opt.action = p->action;
opt.mtu = p->tcfp_mtu;
opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
if (p->rate_present) {
@@ -392,12 +392,12 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &police->tcf_tm);
if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
goto nla_put_failure;
- spin_unlock_bh(&police->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&police->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index f3abe0545989..8e69a919b4fe 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -72,7 +72,6 @@ static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
d = to_defact(a);
spin_lock_bh(&d->tcf_lock);
goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
- memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
nla_strscpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
spin_unlock_bh(&d->tcf_lock);
if (goto_ch)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 1f1d9ce3e968..8c1d1554f657 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -43,13 +43,11 @@ TC_INDIRECT_SCOPE int tcf_skbedit_act(struct sk_buff *skb,
{
struct tcf_skbedit *d = to_skbedit(a);
struct tcf_skbedit_params *params;
- int action;
tcf_lastuse_update(&d->tcf_tm);
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
params = rcu_dereference_bh(d->params);
- action = READ_ONCE(d->tcf_action);
if (params->flags & SKBEDIT_F_PRIORITY)
skb->priority = params->priority;
@@ -85,7 +83,7 @@ TC_INDIRECT_SCOPE int tcf_skbedit_act(struct sk_buff *skb,
}
if (params->flags & SKBEDIT_F_PTYPE)
skb->pkt_type = params->ptype;
- return action;
+ return params->action;
err:
qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
@@ -262,6 +260,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (flags & SKBEDIT_F_MASK)
params_new->mask = *mask;
+ params_new->action = parm->action;
spin_lock_bh(&d->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params_new = rcu_replace_pointer(d->params, params_new,
@@ -284,9 +283,9 @@ release_idr:
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
+ const struct tcf_skbedit *d = to_skbedit(a);
unsigned char *b = skb_tail_pointer(skb);
- struct tcf_skbedit *d = to_skbedit(a);
- struct tcf_skbedit_params *params;
+ const struct tcf_skbedit_params *params;
struct tc_skbedit opt = {
.index = d->tcf_index,
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
@@ -295,10 +294,9 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
u64 pure_flags = 0;
struct tcf_t t;
- spin_lock_bh(&d->tcf_lock);
- params = rcu_dereference_protected(d->params,
- lockdep_is_held(&d->tcf_lock));
- opt.action = d->tcf_action;
+ rcu_read_lock();
+ params = rcu_dereference(d->params);
+ opt.action = params->action;
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -333,12 +331,12 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &d->tcf_tm);
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
goto nla_put_failure;
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index dc0229693461..a9e0c1326e2a 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -27,19 +27,18 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
struct tcf_result *res)
{
struct tcf_skbmod *d = to_skbmod(a);
- int action, max_edit_len, err;
struct tcf_skbmod_params *p;
+ int max_edit_len, err;
u64 flags;
tcf_lastuse_update(&d->tcf_tm);
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
- action = READ_ONCE(d->tcf_action);
- if (unlikely(action == TC_ACT_SHOT))
+ p = rcu_dereference_bh(d->skbmod_p);
+ if (unlikely(p->action == TC_ACT_SHOT))
goto drop;
max_edit_len = skb_mac_header_len(skb);
- p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
/* tcf_skbmod_init() guarantees "flags" to be one of the following:
@@ -85,7 +84,7 @@ TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb,
INET_ECN_set_ce(skb);
out:
- return action;
+ return p->action;
drop:
qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
@@ -193,7 +192,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
}
p->flags = lflags;
-
+ p->action = parm->action;
if (ovr)
spin_lock_bh(&d->tcf_lock);
/* Protected by tcf_lock if overwriting existing action. */
@@ -248,10 +247,9 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
opt.index = d->tcf_index;
opt.refcnt = refcount_read(&d->tcf_refcnt) - ref;
opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind;
- spin_lock_bh(&d->tcf_lock);
- opt.action = d->tcf_action;
- p = rcu_dereference_protected(d->skbmod_p,
- lockdep_is_held(&d->tcf_lock));
+ rcu_read_lock();
+ p = rcu_dereference(d->skbmod_p);
+ opt.action = p->action;
opt.flags = p->flags;
if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -269,10 +267,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
goto nla_put_failure;
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&d->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 2cef4b08befb..876b30c5709e 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -29,13 +29,11 @@ TC_INDIRECT_SCOPE int tunnel_key_act(struct sk_buff *skb,
{
struct tcf_tunnel_key *t = to_tunnel_key(a);
struct tcf_tunnel_key_params *params;
- int action;
params = rcu_dereference_bh(t->params);
tcf_lastuse_update(&t->tcf_tm);
tcf_action_update_bstats(&t->common, skb);
- action = READ_ONCE(t->tcf_action);
switch (params->tcft_action) {
case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -51,7 +49,7 @@ TC_INDIRECT_SCOPE int tunnel_key_act(struct sk_buff *skb,
break;
}
- return action;
+ return params->action;
}
static const struct nla_policy
@@ -532,6 +530,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
+ params_new->action = parm->action;
spin_lock_bh(&t->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params_new = rcu_replace_pointer(t->params, params_new,
@@ -726,10 +725,9 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t tm;
- spin_lock_bh(&t->tcf_lock);
- params = rcu_dereference_protected(t->params,
- lockdep_is_held(&t->tcf_lock));
- opt.action = t->tcf_action;
+ rcu_read_lock();
+ params = rcu_dereference(t->params);
+ opt.action = params->action;
opt.t_action = params->tcft_action;
if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
@@ -766,12 +764,12 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
&tm, TCA_TUNNEL_KEY_PAD))
goto nla_put_failure;
- spin_unlock_bh(&t->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&t->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 383bf18b6862..a74621797d69 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -25,7 +25,6 @@ TC_INDIRECT_SCOPE int tcf_vlan_act(struct sk_buff *skb,
{
struct tcf_vlan *v = to_vlan(a);
struct tcf_vlan_params *p;
- int action;
int err;
u16 tci;
@@ -38,8 +37,6 @@ TC_INDIRECT_SCOPE int tcf_vlan_act(struct sk_buff *skb,
if (skb_at_tc_ingress(skb))
skb_push_rcsum(skb, skb->mac_len);
- action = READ_ONCE(v->tcf_action);
-
p = rcu_dereference_bh(v->vlan_p);
switch (p->tcfv_action) {
@@ -97,7 +94,7 @@ out:
skb_pull_rcsum(skb, skb->mac_len);
skb_reset_mac_len(skb);
- return action;
+ return p->action;
drop:
tcf_action_inc_drop_qstats(&v->common);
@@ -255,6 +252,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
ETH_ALEN);
}
+ p->action = parm->action;
spin_lock_bh(&v->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
@@ -297,9 +295,9 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
};
struct tcf_t t;
- spin_lock_bh(&v->tcf_lock);
- opt.action = v->tcf_action;
- p = rcu_dereference_protected(v->vlan_p, lockdep_is_held(&v->tcf_lock));
+ rcu_read_lock();
+ p = rcu_dereference(v->vlan_p);
+ opt.action = p->action;
opt.v_action = p->tcfv_action;
if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -325,12 +323,12 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &v->tcf_tm);
if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
goto nla_put_failure;
- spin_unlock_bh(&v->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&v->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/bpf_qdisc.c b/net/sched/bpf_qdisc.c
index 7ea8b54b2ab1..adcb618a2bfc 100644
--- a/net/sched/bpf_qdisc.c
+++ b/net/sched/bpf_qdisc.c
@@ -130,8 +130,7 @@ static int bpf_qdisc_btf_struct_access(struct bpf_verifier_log *log,
return 0;
}
-BTF_ID_LIST(bpf_qdisc_init_prologue_ids)
-BTF_ID(func, bpf_qdisc_init_prologue)
+BTF_ID_LIST_SINGLE(bpf_qdisc_init_prologue_ids, func, bpf_qdisc_init_prologue)
static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
@@ -161,8 +160,7 @@ static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
return insn - insn_buf;
}
-BTF_ID_LIST(bpf_qdisc_reset_destroy_epilogue_ids)
-BTF_ID(func, bpf_qdisc_reset_destroy_epilogue)
+BTF_ID_LIST_SINGLE(bpf_qdisc_reset_destroy_epilogue_ids, func, bpf_qdisc_reset_destroy_epilogue)
static int bpf_qdisc_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
s16 ctx_stack_off)
@@ -451,8 +449,7 @@ static struct bpf_struct_ops bpf_Qdisc_ops = {
.owner = THIS_MODULE,
};
-BTF_ID_LIST(bpf_sk_buff_dtor_ids)
-BTF_ID(func, bpf_kfree_skb)
+BTF_ID_LIST_SINGLE(bpf_sk_buff_dtor_ids, func, bpf_kfree_skb)
static int __init bpf_qdisc_kfunc_init(void)
{
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 420c66203b17..6b3d0af72c39 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -108,7 +108,7 @@ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
struct text_match *tm = EM_TEXT_PRIV(m);
struct tcf_em_text conf;
- strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1);
+ strscpy(conf.algo, tm->config->ops->name);
conf.from_offset = tm->from_offset;
conf.to_offset = tm->to_offset;
conf.from_layer = tm->from_layer;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c5e3673aadbe..1e058b46d3e1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -336,17 +336,22 @@ out:
return q;
}
-static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
+static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid,
+ struct netlink_ext_ack *extack)
{
unsigned long cl;
const struct Qdisc_class_ops *cops = p->ops->cl_ops;
- if (cops == NULL)
- return NULL;
+ if (cops == NULL) {
+ NL_SET_ERR_MSG(extack, "Parent qdisc is not classful");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
cl = cops->find(p, classid);
- if (cl == 0)
- return NULL;
+ if (cl == 0) {
+ NL_SET_ERR_MSG(extack, "Specified class not found");
+ return ERR_PTR(-ENOENT);
+ }
return cops->leaf(p, cl);
}
@@ -426,7 +431,7 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
- !memcmp(&rtab->data, nla_data(tab), 1024)) {
+ !memcmp(&rtab->data, nla_data(tab), TC_RTAB_SIZE)) {
rtab->refcnt++;
return rtab;
}
@@ -436,7 +441,7 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
if (rtab) {
rtab->rate = *r;
rtab->refcnt = 1;
- memcpy(rtab->data, nla_data(tab), 1024);
+ memcpy(rtab->data, nla_data(tab), TC_RTAB_SIZE);
if (r->linklayer == TC_LINKLAYER_UNAWARE)
r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
@@ -596,16 +601,6 @@ out:
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
-void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
-{
- if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
- pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
- txt, qdisc->ops->id, qdisc->handle >> 16);
- qdisc->flags |= TCQ_F_WARN_NONWC;
- }
-}
-EXPORT_SYMBOL(qdisc_warn_nonwc);
-
static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
@@ -780,15 +775,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{
- bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
bool notify;
int drops;
- if (n == 0 && len == 0)
- return;
drops = max_t(int, n, 0);
rcu_read_lock();
while ((parentid = sch->parent)) {
@@ -797,17 +789,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
if (sch->flags & TCQ_F_NOPARENT)
break;
- /* Notify parent qdisc only if child qdisc becomes empty.
- *
- * If child was empty even before update then backlog
- * counter is screwed and we skip notification because
- * parent class is already passive.
- *
- * If the original child was offloaded then it is allowed
- * to be seem as empty, so the parent is notified anyway.
- */
- notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
- !qdisc_is_offloaded);
+ /* Notify parent qdisc only if child qdisc becomes empty. */
+ notify = !sch->q.qlen;
/* TODO: perform the search on a per txq basis */
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
@@ -816,6 +799,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
}
cops = sch->ops->cl_ops;
if (notify && cops->qlen_notify) {
+ /* Note that qlen_notify must be idempotent as it may get called
+ * multiple times.
+ */
cl = cops->find(sch, parentid);
cops->qlen_notify(sch, cl);
}
@@ -1499,7 +1485,7 @@ static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
return -ENOENT;
}
- q = qdisc_leaf(p, clid);
+ q = qdisc_leaf(p, clid, extack);
} else if (dev_ingress_queue(dev)) {
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
@@ -1510,6 +1496,8 @@ static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
return -ENOENT;
}
+ if (IS_ERR(q))
+ return PTR_ERR(q);
if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
NL_SET_ERR_MSG(extack, "Invalid handle");
@@ -1611,7 +1599,9 @@ static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
return -ENOENT;
}
- q = qdisc_leaf(p, clid);
+ q = qdisc_leaf(p, clid, extack);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
} else if (dev_ingress_queue_create(dev)) {
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 48dd8c88903f..32bacfc314c2 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1407,7 +1407,10 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
return cake_calc_overhead(q, len, off);
/* borrowed from qdisc_pkt_len_init() */
- hdr_len = skb_transport_offset(skb);
+ if (!skb->encapsulation)
+ hdr_len = skb_transport_offset(skb);
+ else
+ hdr_len = skb_inner_transport_offset(skb);
/* + transport layer */
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
@@ -1747,7 +1750,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
- u32 idx;
+ u32 idx, tin;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1757,6 +1760,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
}
+ tin = (u32)(b - q->tins);
idx--;
flow = &b->flows[idx];
@@ -1924,13 +1928,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
+ bool same_flow = false;
u32 dropped = 0;
+ u32 drop_id;
while (q->buffer_used > q->buffer_limit) {
dropped++;
- cake_drop(sch, to_free);
+ drop_id = cake_drop(sch, to_free);
+
+ if ((drop_id >> 16) == tin &&
+ (drop_id & 0xFFFF) == idx)
+ same_flow = true;
}
b->drop_overlimit += dropped;
+
+ if (same_flow)
+ return NET_XMIT_CN;
}
return NET_XMIT_SUCCESS;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index c93761040c6e..fa0314679e43 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -101,9 +101,9 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
static int codel_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CODEL_MAX + 1];
- unsigned int qlen, dropped = 0;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
@@ -142,15 +142,17 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->params.ecn,
!!nla_get_u32(tb[TCA_CODEL_ECN]));
- qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
- dropped += qdisc_pkt_len(skb);
- qdisc_qstats_backlog_dec(sch, skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c
new file mode 100644
index 000000000000..4b975feb52b1
--- /dev/null
+++ b/net/sched/sch_dualpi2.c
@@ -0,0 +1,1176 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+/* Copyright (C) 2024 Nokia
+ *
+ * Author: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
+ * Author: Olga Albisser <olga@albisser.org>
+ * Author: Henrik Steen <henrist@henrist.net>
+ * Author: Olivier Tilmans <olivier.tilmans@nokia.com>
+ * Author: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
+ *
+ * DualPI Improved with a Square (dualpi2):
+ * - Supports congestion controls that comply with the Prague requirements
+ * in RFC9331 (e.g. TCP-Prague)
+ * - Supports coupled dual-queue with PI2 as defined in RFC9332
+ * - Supports ECN L4S-identifier (IP.ECN==0b*1)
+ *
+ * note: Although DCTCP and BBRv3 can use shallow-threshold ECN marks,
+ * they do not meet the 'Prague L4S Requirements' listed in RFC 9331
+ * Section 4, so they can only be used with DualPI2 in a datacenter
+ * context.
+ *
+ * References:
+ * - RFC9332: https://datatracker.ietf.org/doc/html/rfc9332
+ * - De Schepper, Koen, et al. "PI 2: A linearized AQM for both classic and
+ * scalable TCP." in proc. ACM CoNEXT'16, 2016.
+ */
+
+#include <linux/errno.h>
+#include <linux/hrtimer.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include <net/gso.h>
+#include <net/inet_ecn.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* 32b enable to support flows with windows up to ~8.6 * 1e9 packets
+ * i.e., twice the maximal snd_cwnd.
+ * MAX_PROB must be consistent with the RNG in dualpi2_roll().
+ */
+#define MAX_PROB U32_MAX
+
+/* alpha/beta values exchanged over netlink are in units of 256ns */
+#define ALPHA_BETA_SHIFT 8
+
+/* Scaled values of alpha/beta must fit in 32b to avoid overflow in later
+ * computations. Consequently (see and dualpi2_scale_alpha_beta()), their
+ * netlink-provided values can use at most 31b, i.e. be at most (2^23)-1
+ * (~4MHz) as those are given in 1/256th. This enable to tune alpha/beta to
+ * control flows whose maximal RTTs can be in usec up to few secs.
+ */
+#define ALPHA_BETA_MAX ((1U << 31) - 1)
+
+/* Internal alpha/beta are in units of 64ns.
+ * This enables to use all alpha/beta values in the allowed range without loss
+ * of precision due to rounding when scaling them internally, e.g.,
+ * scale_alpha_beta(1) will not round down to 0.
+ */
+#define ALPHA_BETA_GRANULARITY 6
+
+#define ALPHA_BETA_SCALING (ALPHA_BETA_SHIFT - ALPHA_BETA_GRANULARITY)
+
+/* We express the weights (wc, wl) in %, i.e., wc + wl = 100 */
+#define MAX_WC 100
+
+struct dualpi2_sched_data {
+ struct Qdisc *l_queue; /* The L4S Low latency queue (L-queue) */
+ struct Qdisc *sch; /* The Classic queue (C-queue) */
+
+ /* Registered tc filters */
+ struct tcf_proto __rcu *tcf_filters;
+ struct tcf_block *tcf_block;
+
+ /* PI2 parameters */
+ u64 pi2_target; /* Target delay in nanoseconds */
+ u32 pi2_tupdate; /* Timer frequency in nanoseconds */
+ u32 pi2_prob; /* Base PI probability */
+ u32 pi2_alpha; /* Gain factor for the integral rate response */
+ u32 pi2_beta; /* Gain factor for the proportional response */
+ struct hrtimer pi2_timer; /* prob update timer */
+
+ /* Step AQM (L-queue only) parameters */
+ u32 step_thresh; /* Step threshold */
+ bool step_in_packets; /* Step thresh in packets (1) or time (0) */
+
+ /* C-queue starvation protection */
+ s32 c_protection_credit; /* Credit (sign indicates which queue) */
+ s32 c_protection_init; /* Reset value of the credit */
+ u8 c_protection_wc; /* C-queue weight (between 0 and MAX_WC) */
+ u8 c_protection_wl; /* L-queue weight (MAX_WC - wc) */
+
+ /* General dualQ parameters */
+ u32 memory_limit; /* Memory limit of both queues */
+ u8 coupling_factor;/* Coupling factor (k) between both queues */
+ u8 ecn_mask; /* Mask to match packets into L-queue */
+ u32 min_qlen_step; /* Minimum queue length to apply step thresh */
+ bool drop_early; /* Drop at enqueue (1) instead of dequeue (0) */
+ bool drop_overload; /* Drop (1) on overload, or overflow (0) */
+ bool split_gso; /* Split aggregated skb (1) or leave as is (0) */
+
+ /* Statistics */
+ u64 c_head_ts; /* Enqueue timestamp of the C-queue head */
+ u64 l_head_ts; /* Enqueue timestamp of the L-queue head */
+ u64 last_qdelay; /* Q delay val at the last probability update */
+ u32 packets_in_c; /* Enqueue packet counter of the C-queue */
+ u32 packets_in_l; /* Enqueue packet counter of the L-queue */
+ u32 maxq; /* Maximum queue size of the C-queue */
+ u32 ecn_mark; /* ECN mark pkt counter due to PI probability */
+ u32 step_marks; /* ECN mark pkt counter due to step AQM */
+ u32 memory_used; /* Memory used of both queues */
+ u32 max_memory_used;/* Maximum used memory */
+
+ /* Deferred drop statistics */
+ u32 deferred_drops_cnt; /* Packets dropped */
+ u32 deferred_drops_len; /* Bytes dropped */
+};
+
+struct dualpi2_skb_cb {
+ u64 ts; /* Timestamp at enqueue */
+ u8 apply_step:1, /* Can we apply the step threshold */
+ classified:2, /* Packet classification results */
+ ect:2; /* Packet ECT codepoint */
+};
+
+enum dualpi2_classification_results {
+ DUALPI2_C_CLASSIC = 0, /* C-queue */
+ DUALPI2_C_L4S = 1, /* L-queue (scale mark/classic drop) */
+ DUALPI2_C_LLLL = 2, /* L-queue (no drops/marks) */
+ __DUALPI2_C_MAX /* Keep last*/
+};
+
+static struct dualpi2_skb_cb *dualpi2_skb_cb(struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct dualpi2_skb_cb));
+ return (struct dualpi2_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static u64 dualpi2_sojourn_time(struct sk_buff *skb, u64 reference)
+{
+ return reference - dualpi2_skb_cb(skb)->ts;
+}
+
+static u64 head_enqueue_time(struct Qdisc *q)
+{
+ struct sk_buff *skb = qdisc_peek_head(q);
+
+ return skb ? dualpi2_skb_cb(skb)->ts : 0;
+}
+
+static u32 dualpi2_scale_alpha_beta(u32 param)
+{
+ u64 tmp = ((u64)param * MAX_PROB >> ALPHA_BETA_SCALING);
+
+ do_div(tmp, NSEC_PER_SEC);
+ return tmp;
+}
+
+static u32 dualpi2_unscale_alpha_beta(u32 param)
+{
+ u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING);
+
+ do_div(tmp, MAX_PROB);
+ return tmp;
+}
+
+static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
+{
+ return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
+}
+
+static bool skb_is_l4s(struct sk_buff *skb)
+{
+ return dualpi2_skb_cb(skb)->classified == DUALPI2_C_L4S;
+}
+
+static bool skb_in_l_queue(struct sk_buff *skb)
+{
+ return dualpi2_skb_cb(skb)->classified != DUALPI2_C_CLASSIC;
+}
+
+static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
+{
+ return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
+}
+
+static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
+{
+ if (INET_ECN_set_ce(skb)) {
+ q->ecn_mark++;
+ return true;
+ }
+ return false;
+}
+
+static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
+{
+ q->c_protection_credit = q->c_protection_init;
+}
+
+/* This computes the initial credit value and WRR weight for the L queue (wl)
+ * from the weight of the C queue (wc).
+ * If wl > wc, the scheduler will start with the L queue when reset.
+ */
+static void dualpi2_calculate_c_protection(struct Qdisc *sch,
+ struct dualpi2_sched_data *q, u32 wc)
+{
+ q->c_protection_wc = wc;
+ q->c_protection_wl = MAX_WC - wc;
+ q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
+ ((int)q->c_protection_wc - (int)q->c_protection_wl);
+ dualpi2_reset_c_protection(q);
+}
+
+static bool dualpi2_roll(u32 prob)
+{
+ return get_random_u32() <= prob;
+}
+
+/* Packets in the C-queue are subject to a marking probability pC, which is the
+ * square of the internal PI probability (i.e., have an overall lower mark/drop
+ * probability). If the qdisc is overloaded, ignore ECT values and only drop.
+ *
+ * Note that this marking scheme is also applied to L4S packets during overload.
+ * Return true if packet dropping is required in C queue
+ */
+static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
+ struct sk_buff *skb, u32 prob,
+ bool overload)
+{
+ if (dualpi2_roll(prob) && dualpi2_roll(prob)) {
+ if (overload || dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
+ return true;
+ dualpi2_mark(q, skb);
+ }
+ return false;
+}
+
+/* Packets in the L-queue are subject to a marking probability pL given by the
+ * internal PI probability scaled by the coupling factor.
+ *
+ * On overload (i.e., @local_l_prob is >= 100%):
+ * - if the qdisc is configured to trade losses to preserve latency (i.e.,
+ * @q->drop_overload), apply classic drops first before marking.
+ * - otherwise, preserve the "no loss" property of ECN at the cost of queueing
+ * delay, eventually resulting in taildrop behavior once sch->limit is
+ * reached.
+ * Return true if packet dropping is required in L queue
+ */
+static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
+ struct sk_buff *skb,
+ u64 local_l_prob, u32 prob,
+ bool overload)
+{
+ if (overload) {
+ /* Apply classic drop */
+ if (!q->drop_overload ||
+ !(dualpi2_roll(prob) && dualpi2_roll(prob)))
+ goto mark;
+ return true;
+ }
+
+ /* We can safely cut the upper 32b as overload==false */
+ if (dualpi2_roll(local_l_prob)) {
+ /* Non-ECT packets could have classified as L4S by filters. */
+ if (dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
+ return true;
+mark:
+ dualpi2_mark(q, skb);
+ }
+ return false;
+}
+
+/* Decide whether a given packet must be dropped (or marked if ECT), according
+ * to the PI2 probability.
+ *
+ * Never mark/drop if we have a standing queue of less than 2 MTUs.
+ */
+static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
+ struct sk_buff *skb)
+{
+ u64 local_l_prob;
+ bool overload;
+ u32 prob;
+
+ if (sch->qstats.backlog < 2 * psched_mtu(qdisc_dev(sch)))
+ return false;
+
+ prob = READ_ONCE(q->pi2_prob);
+ local_l_prob = (u64)prob * q->coupling_factor;
+ overload = local_l_prob > MAX_PROB;
+
+ switch (dualpi2_skb_cb(skb)->classified) {
+ case DUALPI2_C_CLASSIC:
+ return dualpi2_classic_marking(q, skb, prob, overload);
+ case DUALPI2_C_L4S:
+ return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
+ overload);
+ default: /* DUALPI2_C_LLLL */
+ return false;
+ }
+}
+
+static void dualpi2_read_ect(struct sk_buff *skb)
+{
+ struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
+ int wlen = skb_network_offset(skb);
+
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ goto not_ecn;
+
+ cb->ect = ipv4_get_dsfield(ip_hdr(skb)) & INET_ECN_MASK;
+ break;
+ case htons(ETH_P_IPV6):
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ goto not_ecn;
+
+ cb->ect = ipv6_get_dsfield(ipv6_hdr(skb)) & INET_ECN_MASK;
+ break;
+ default:
+ goto not_ecn;
+ }
+ return;
+
+not_ecn:
+ /* Non pullable/writable packets can only be dropped hence are
+ * classified as not ECT.
+ */
+ cb->ect = INET_ECN_NOT_ECT;
+}
+
+static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
+ struct sk_buff *skb)
+{
+ struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
+ struct tcf_result res;
+ struct tcf_proto *fl;
+ int result;
+
+ dualpi2_read_ect(skb);
+ if (cb->ect & q->ecn_mask) {
+ cb->classified = DUALPI2_C_L4S;
+ return NET_XMIT_SUCCESS;
+ }
+
+ if (TC_H_MAJ(skb->priority) == q->sch->handle &&
+ TC_H_MIN(skb->priority) < __DUALPI2_C_MAX) {
+ cb->classified = TC_H_MIN(skb->priority);
+ return NET_XMIT_SUCCESS;
+ }
+
+ fl = rcu_dereference_bh(q->tcf_filters);
+ if (!fl) {
+ cb->classified = DUALPI2_C_CLASSIC;
+ return NET_XMIT_SUCCESS;
+ }
+
+ result = tcf_classify(skb, NULL, fl, &res, false);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+#endif
+ cb->classified = TC_H_MIN(res.classid) < __DUALPI2_C_MAX ?
+ TC_H_MIN(res.classid) : DUALPI2_C_CLASSIC;
+ }
+ return NET_XMIT_SUCCESS;
+}
+
+static int dualpi2_enqueue_skb(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct dualpi2_skb_cb *cb;
+
+ if (unlikely(qdisc_qlen(sch) >= sch->limit) ||
+ unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
+ qdisc_qstats_overlimit(sch);
+ if (skb_in_l_queue(skb))
+ qdisc_qstats_overlimit(q->l_queue);
+ return qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_OVERLIMIT);
+ }
+
+ if (q->drop_early && must_drop(sch, q, skb)) {
+ qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_CONGESTED);
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+
+ cb = dualpi2_skb_cb(skb);
+ cb->ts = ktime_get_ns();
+ q->memory_used += skb->truesize;
+ if (q->memory_used > q->max_memory_used)
+ q->max_memory_used = q->memory_used;
+
+ if (qdisc_qlen(sch) > q->maxq)
+ q->maxq = qdisc_qlen(sch);
+
+ if (skb_in_l_queue(skb)) {
+ /* Apply step thresh if skb is L4S && L-queue len >= min_qlen */
+ dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
+
+ /* Keep the overall qdisc stats consistent */
+ ++sch->q.qlen;
+ qdisc_qstats_backlog_inc(sch, skb);
+ ++q->packets_in_l;
+ if (!q->l_head_ts)
+ q->l_head_ts = cb->ts;
+ return qdisc_enqueue_tail(skb, q->l_queue);
+ }
+ ++q->packets_in_c;
+ if (!q->c_head_ts)
+ q->c_head_ts = cb->ts;
+ return qdisc_enqueue_tail(skb, sch);
+}
+
+/* By default, dualpi2 will split GSO skbs into independent skbs and enqueue
+ * each of those individually. This yields the following benefits, at the
+ * expense of CPU usage:
+ * - Finer-grained AQM actions as the sub-packets of a burst no longer share the
+ * same fate (e.g., the random mark/drop probability is applied individually)
+ * - Improved precision of the starvation protection/WRR scheduler at dequeue,
+ * as the size of the dequeued packets will be smaller.
+ */
+static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ err = dualpi2_skb_classify(q, skb);
+ if (err != NET_XMIT_SUCCESS) {
+ if (err & __NET_XMIT_BYPASS)
+ qdisc_qstats_drop(sch);
+ __qdisc_drop(skb, to_free);
+ return err;
+ }
+
+ if (q->split_gso && skb_is_gso(skb)) {
+ netdev_features_t features;
+ struct sk_buff *nskb, *next;
+ int cnt, byte_len, orig_len;
+ int err;
+
+ features = netif_skb_features(skb);
+ nskb = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(nskb))
+ return qdisc_drop(skb, sch, to_free);
+
+ cnt = 1;
+ byte_len = 0;
+ orig_len = qdisc_pkt_len(skb);
+ skb_list_walk_safe(nskb, nskb, next) {
+ skb_mark_not_on_list(nskb);
+
+ /* Iterate through GSO fragments of an skb:
+ * (1) Set pkt_len from the single GSO fragments
+ * (2) Copy classified and ect values of an skb
+ * (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
+ */
+ qdisc_skb_cb(nskb)->pkt_len = nskb->len;
+ dualpi2_skb_cb(nskb)->classified =
+ dualpi2_skb_cb(skb)->classified;
+ dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
+ err = dualpi2_enqueue_skb(nskb, sch, to_free);
+
+ if (err == NET_XMIT_SUCCESS) {
+ /* Compute the backlog adjustment that needs
+ * to be propagated in the qdisc tree to reflect
+ * all new skbs successfully enqueued.
+ */
+ ++cnt;
+ byte_len += nskb->len;
+ }
+ }
+ if (cnt > 1) {
+ /* The caller will add the original skb stats to its
+ * backlog, compensate this if any nskb is enqueued.
+ */
+ --cnt;
+ byte_len -= orig_len;
+ }
+ qdisc_tree_reduce_backlog(sch, -cnt, -byte_len);
+ consume_skb(skb);
+ return err;
+ }
+ return dualpi2_enqueue_skb(skb, sch, to_free);
+}
+
+/* Select the queue from which the next packet can be dequeued, ensuring that
+ * neither queue can starve the other with a WRR scheduler.
+ *
+ * The sign of the WRR credit determines the next queue, while the size of
+ * the dequeued packet determines the magnitude of the WRR credit change. If
+ * either queue is empty, the WRR credit is kept unchanged.
+ *
+ * As the dequeued packet can be dropped later, the caller has to perform the
+ * qdisc_bstats_update() calls.
+ */
+static struct sk_buff *dequeue_packet(struct Qdisc *sch,
+ struct dualpi2_sched_data *q,
+ int *credit_change,
+ u64 now)
+{
+ struct sk_buff *skb = NULL;
+ int c_len;
+
+ *credit_change = 0;
+ c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
+ if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
+ skb = __qdisc_dequeue_head(&q->l_queue->q);
+ WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
+ if (c_len)
+ *credit_change = q->c_protection_wc;
+ qdisc_qstats_backlog_dec(q->l_queue, skb);
+
+ /* Keep the global queue size consistent */
+ --sch->q.qlen;
+ q->memory_used -= skb->truesize;
+ } else if (c_len) {
+ skb = __qdisc_dequeue_head(&sch->q);
+ WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
+ if (qdisc_qlen(q->l_queue))
+ *credit_change = ~((s32)q->c_protection_wl) + 1;
+ q->memory_used -= skb->truesize;
+ } else {
+ dualpi2_reset_c_protection(q);
+ return NULL;
+ }
+ *credit_change *= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
+ return skb;
+}
+
+static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
+ u64 now)
+{
+ u64 qdelay = 0;
+
+ if (q->step_in_packets)
+ qdelay = qdisc_qlen(q->l_queue);
+ else
+ qdelay = dualpi2_sojourn_time(skb, now);
+
+ if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
+ if (!dualpi2_skb_cb(skb)->ect) {
+ /* Drop this non-ECT packet */
+ return 1;
+ }
+
+ if (dualpi2_mark(q, skb))
+ ++q->step_marks;
+ }
+ qdisc_bstats_update(q->l_queue, skb);
+ return 0;
+}
+
+static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
+ struct Qdisc *sch, enum skb_drop_reason reason)
+{
+ ++q->deferred_drops_cnt;
+ q->deferred_drops_len += qdisc_pkt_len(skb);
+ kfree_skb_reason(skb, reason);
+ qdisc_qstats_drop(sch);
+}
+
+static struct sk_buff *dualpi2_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ int credit_change;
+ u64 now;
+
+ now = ktime_get_ns();
+
+ while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
+ if (!q->drop_early && must_drop(sch, q, skb)) {
+ drop_and_retry(q, skb, sch,
+ SKB_DROP_REASON_QDISC_CONGESTED);
+ continue;
+ }
+
+ if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
+ qdisc_qstats_drop(q->l_queue);
+ drop_and_retry(q, skb, sch,
+ SKB_DROP_REASON_DUALPI2_STEP_DROP);
+ continue;
+ }
+
+ q->c_protection_credit += credit_change;
+ qdisc_bstats_update(sch, skb);
+ break;
+ }
+
+ if (q->deferred_drops_cnt) {
+ qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
+ q->deferred_drops_len);
+ q->deferred_drops_cnt = 0;
+ q->deferred_drops_len = 0;
+ }
+ return skb;
+}
+
+static s64 __scale_delta(u64 diff)
+{
+ do_div(diff, 1 << ALPHA_BETA_GRANULARITY);
+ return diff;
+}
+
+static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
+ u64 *qdelay_l)
+{
+ u64 now, qc, ql;
+
+ now = ktime_get_ns();
+ qc = READ_ONCE(q->c_head_ts);
+ ql = READ_ONCE(q->l_head_ts);
+
+ *qdelay_c = qc ? now - qc : 0;
+ *qdelay_l = ql ? now - ql : 0;
+}
+
+static u32 calculate_probability(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ u32 new_prob;
+ u64 qdelay_c;
+ u64 qdelay_l;
+ u64 qdelay;
+ s64 delta;
+
+ get_queue_delays(q, &qdelay_c, &qdelay_l);
+ qdelay = max(qdelay_l, qdelay_c);
+
+ /* Alpha and beta take at most 32b, i.e, the delay difference would
+ * overflow for queuing delay differences > ~4.2sec.
+ */
+ delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
+ delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
+ q->last_qdelay = qdelay;
+
+ /* Bound new_prob between 0 and MAX_PROB */
+ if (delta > 0) {
+ new_prob = __scale_delta(delta) + q->pi2_prob;
+ if (new_prob < q->pi2_prob)
+ new_prob = MAX_PROB;
+ } else {
+ new_prob = q->pi2_prob - __scale_delta(~delta + 1);
+ if (new_prob > q->pi2_prob)
+ new_prob = 0;
+ }
+
+ /* If we do not drop on overload, ensure we cap the L4S probability to
+ * 100% to keep window fairness when overflowing.
+ */
+ if (!q->drop_overload)
+ return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
+ return new_prob;
+}
+
+static u32 get_memory_limit(struct Qdisc *sch, u32 limit)
+{
+ /* Apply rule of thumb, i.e., doubling the packet length,
+ * to further include per packet overhead in memory_limit.
+ */
+ u64 memlim = mul_u32_u32(limit, 2 * psched_mtu(qdisc_dev(sch)));
+
+ if (upper_32_bits(memlim))
+ return U32_MAX;
+ else
+ return lower_32_bits(memlim);
+}
+
+static u32 convert_us_to_nsec(u32 us)
+{
+ u64 ns = mul_u32_u32(us, NSEC_PER_USEC);
+
+ if (upper_32_bits(ns))
+ return U32_MAX;
+
+ return lower_32_bits(ns);
+}
+
+static u32 convert_ns_to_usec(u64 ns)
+{
+ do_div(ns, NSEC_PER_USEC);
+ if (upper_32_bits(ns))
+ return U32_MAX;
+
+ return lower_32_bits(ns);
+}
+
+static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer)
+{
+ struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
+ struct Qdisc *sch = q->sch;
+ spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+
+ rcu_read_lock();
+ root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spin_lock(root_lock);
+
+ WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
+ hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
+
+ spin_unlock(root_lock);
+ rcu_read_unlock();
+ return HRTIMER_RESTART;
+}
+
+static struct netlink_range_validation dualpi2_alpha_beta_range = {
+ .min = 1,
+ .max = ALPHA_BETA_MAX,
+};
+
+static const struct nla_policy dualpi2_policy[TCA_DUALPI2_MAX + 1] = {
+ [TCA_DUALPI2_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_MEMORY_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_TARGET] = { .type = NLA_U32 },
+ [TCA_DUALPI2_TUPDATE] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_ALPHA] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
+ [TCA_DUALPI2_BETA] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
+ [TCA_DUALPI2_STEP_THRESH_PKTS] = { .type = NLA_U32 },
+ [TCA_DUALPI2_STEP_THRESH_US] = { .type = NLA_U32 },
+ [TCA_DUALPI2_MIN_QLEN_STEP] = { .type = NLA_U32 },
+ [TCA_DUALPI2_COUPLING] = NLA_POLICY_MIN(NLA_U8, 1),
+ [TCA_DUALPI2_DROP_OVERLOAD] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_OVERLOAD_MAX),
+ [TCA_DUALPI2_DROP_EARLY] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_EARLY_MAX),
+ [TCA_DUALPI2_C_PROTECTION] =
+ NLA_POLICY_RANGE(NLA_U8, 0, MAX_WC),
+ [TCA_DUALPI2_ECN_MASK] =
+ NLA_POLICY_RANGE(NLA_U8, TC_DUALPI2_ECN_MASK_L4S_ECT,
+ TCA_DUALPI2_ECN_MASK_MAX),
+ [TCA_DUALPI2_SPLIT_GSO] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_SPLIT_GSO_MAX),
+};
+
+static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_DUALPI2_MAX + 1];
+ struct dualpi2_sched_data *q;
+ int old_backlog;
+ int old_qlen;
+ int err;
+
+ if (!opt || !nla_len(opt)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dualpi2 options are required");
+ return -EINVAL;
+ }
+ err = nla_parse_nested(tb, TCA_DUALPI2_MAX, opt, dualpi2_policy,
+ extack);
+ if (err < 0)
+ return err;
+ if (tb[TCA_DUALPI2_STEP_THRESH_PKTS] && tb[TCA_DUALPI2_STEP_THRESH_US]) {
+ NL_SET_ERR_MSG_MOD(extack, "multiple step thresh attributes");
+ return -EINVAL;
+ }
+
+ q = qdisc_priv(sch);
+ sch_tree_lock(sch);
+
+ if (tb[TCA_DUALPI2_LIMIT]) {
+ u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]);
+
+ WRITE_ONCE(sch->limit, limit);
+ WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
+ }
+
+ if (tb[TCA_DUALPI2_MEMORY_LIMIT])
+ WRITE_ONCE(q->memory_limit,
+ nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]));
+
+ if (tb[TCA_DUALPI2_TARGET]) {
+ u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]);
+
+ WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
+ }
+
+ if (tb[TCA_DUALPI2_TUPDATE]) {
+ u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]);
+
+ WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
+ }
+
+ if (tb[TCA_DUALPI2_ALPHA]) {
+ u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]);
+
+ WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
+ }
+
+ if (tb[TCA_DUALPI2_BETA]) {
+ u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]);
+
+ WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
+ }
+
+ if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) {
+ u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]);
+
+ WRITE_ONCE(q->step_in_packets, true);
+ WRITE_ONCE(q->step_thresh, step_th);
+ } else if (tb[TCA_DUALPI2_STEP_THRESH_US]) {
+ u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]);
+
+ WRITE_ONCE(q->step_in_packets, false);
+ WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
+ }
+
+ if (tb[TCA_DUALPI2_MIN_QLEN_STEP])
+ WRITE_ONCE(q->min_qlen_step,
+ nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]));
+
+ if (tb[TCA_DUALPI2_COUPLING]) {
+ u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]);
+
+ WRITE_ONCE(q->coupling_factor, coupling);
+ }
+
+ if (tb[TCA_DUALPI2_DROP_OVERLOAD]) {
+ u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]);
+
+ WRITE_ONCE(q->drop_overload, (bool)drop_overload);
+ }
+
+ if (tb[TCA_DUALPI2_DROP_EARLY]) {
+ u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]);
+
+ WRITE_ONCE(q->drop_early, (bool)drop_early);
+ }
+
+ if (tb[TCA_DUALPI2_C_PROTECTION]) {
+ u8 wc = nla_get_u8(tb[TCA_DUALPI2_C_PROTECTION]);
+
+ dualpi2_calculate_c_protection(sch, q, wc);
+ }
+
+ if (tb[TCA_DUALPI2_ECN_MASK]) {
+ u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]);
+
+ WRITE_ONCE(q->ecn_mask, ecn_mask);
+ }
+
+ if (tb[TCA_DUALPI2_SPLIT_GSO]) {
+ u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]);
+
+ WRITE_ONCE(q->split_gso, (bool)split_gso);
+ }
+
+ old_qlen = qdisc_qlen(sch);
+ old_backlog = sch->qstats.backlog;
+ while (qdisc_qlen(sch) > sch->limit ||
+ q->memory_used > q->memory_limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+
+ q->memory_used -= skb->truesize;
+ qdisc_qstats_backlog_dec(sch, skb);
+ rtnl_qdisc_drop(skb, sch);
+ }
+ qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
+ old_backlog - sch->qstats.backlog);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+/* Default alpha/beta values give a 10dB stability margin with max_rtt=100ms. */
+static void dualpi2_reset_default(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ q->sch->limit = 10000; /* Max 125ms at 1Gbps */
+ q->memory_limit = get_memory_limit(sch, q->sch->limit);
+
+ q->pi2_target = 15 * NSEC_PER_MSEC;
+ q->pi2_tupdate = 16 * NSEC_PER_MSEC;
+ q->pi2_alpha = dualpi2_scale_alpha_beta(41); /* ~0.16 Hz * 256 */
+ q->pi2_beta = dualpi2_scale_alpha_beta(819); /* ~3.20 Hz * 256 */
+
+ q->step_thresh = 1 * NSEC_PER_MSEC;
+ q->step_in_packets = false;
+
+ dualpi2_calculate_c_protection(q->sch, q, 10); /* wc=10%, wl=90% */
+
+ q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT; /* INET_ECN_ECT_1 */
+ q->min_qlen_step = 0; /* Always apply step mark in L-queue */
+ q->coupling_factor = 2; /* window fairness for equal RTTs */
+ q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
+ q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
+ q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO; /* Split GSO */
+}
+
+static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, 1), extack);
+ if (!q->l_queue)
+ return -ENOMEM;
+
+ err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
+ if (err)
+ return err;
+
+ q->sch = sch;
+ dualpi2_reset_default(sch);
+ hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_SOFT);
+
+ if (opt && nla_len(opt)) {
+ err = dualpi2_change(sch, opt, extack);
+
+ if (err)
+ return err;
+ }
+
+ hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
+ HRTIMER_MODE_ABS_PINNED_SOFT);
+ return 0;
+}
+
+static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+ bool step_in_pkts;
+ u32 step_th;
+
+ step_in_pkts = READ_ONCE(q->step_in_packets);
+ step_th = READ_ONCE(q->step_thresh);
+
+ opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
+ if (!opts)
+ goto nla_put_failure;
+
+ if (step_in_pkts &&
+ (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+ READ_ONCE(q->memory_limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_TARGET,
+ convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+ nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+ convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+ nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+ nla_put_u32(skb, TCA_DUALPI2_BETA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+ nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) ||
+ nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+ READ_ONCE(q->min_qlen_step)) ||
+ nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+ READ_ONCE(q->coupling_factor)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+ READ_ONCE(q->drop_overload)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+ READ_ONCE(q->drop_early)) ||
+ nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+ READ_ONCE(q->c_protection_wc)) ||
+ nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+ nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+ goto nla_put_failure;
+
+ if (!step_in_pkts &&
+ (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+ READ_ONCE(q->memory_limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_TARGET,
+ convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+ nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+ convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+ nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+ nla_put_u32(skb, TCA_DUALPI2_BETA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+ nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US,
+ convert_ns_to_usec(step_th)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+ READ_ONCE(q->min_qlen_step)) ||
+ nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+ READ_ONCE(q->coupling_factor)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+ READ_ONCE(q->drop_overload)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+ READ_ONCE(q->drop_early)) ||
+ nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+ READ_ONCE(q->c_protection_wc)) ||
+ nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+ nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct tc_dualpi2_xstats st = {
+ .prob = READ_ONCE(q->pi2_prob),
+ .packets_in_c = q->packets_in_c,
+ .packets_in_l = q->packets_in_l,
+ .maxq = q->maxq,
+ .ecn_mark = q->ecn_mark,
+ .credit = q->c_protection_credit,
+ .step_marks = q->step_marks,
+ .memory_used = q->memory_used,
+ .max_memory_used = q->max_memory_used,
+ .memory_limit = q->memory_limit,
+ };
+ u64 qc, ql;
+
+ get_queue_delays(q, &qc, &ql);
+ st.delay_l = convert_ns_to_usec(ql);
+ st.delay_c = convert_ns_to_usec(qc);
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+/* Reset both L-queue and C-queue, internal packet counters, PI probability,
+ * C-queue protection credit, and timestamps, while preserving current
+ * configuration of DUALPI2.
+ */
+static void dualpi2_reset(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ qdisc_reset_queue(q->l_queue);
+ q->c_head_ts = 0;
+ q->l_head_ts = 0;
+ q->pi2_prob = 0;
+ q->packets_in_c = 0;
+ q->packets_in_l = 0;
+ q->maxq = 0;
+ q->ecn_mark = 0;
+ q->step_marks = 0;
+ q->memory_used = 0;
+ q->max_memory_used = 0;
+ dualpi2_reset_c_protection(q);
+}
+
+static void dualpi2_destroy(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ q->pi2_tupdate = 0;
+ hrtimer_cancel(&q->pi2_timer);
+ if (q->l_queue)
+ qdisc_put(q->l_queue);
+ tcf_block_put(q->tcf_block);
+}
+
+static struct Qdisc *dualpi2_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long dualpi2_find(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long dualpi2_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_block *dualpi2_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return q->tcf_block;
+}
+
+static void dualpi2_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ /* We statically define only 2 queues */
+ for (i = 0; i < 2; i++) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+/* Minimal class support to handle tc filters */
+static const struct Qdisc_class_ops dualpi2_class_ops = {
+ .leaf = dualpi2_leaf,
+ .find = dualpi2_find,
+ .tcf_block = dualpi2_tcf_block,
+ .bind_tcf = dualpi2_bind,
+ .unbind_tcf = dualpi2_unbind,
+ .walk = dualpi2_walk,
+};
+
+static struct Qdisc_ops dualpi2_qdisc_ops __read_mostly = {
+ .id = "dualpi2",
+ .cl_ops = &dualpi2_class_ops,
+ .priv_size = sizeof(struct dualpi2_sched_data),
+ .enqueue = dualpi2_qdisc_enqueue,
+ .dequeue = dualpi2_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = dualpi2_init,
+ .destroy = dualpi2_destroy,
+ .reset = dualpi2_reset,
+ .change = dualpi2_change,
+ .dump = dualpi2_dump,
+ .dump_stats = dualpi2_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init dualpi2_module_init(void)
+{
+ return register_qdisc(&dualpi2_qdisc_ops);
+}
+
+static void __exit dualpi2_module_exit(void)
+{
+ unregister_qdisc(&dualpi2_qdisc_ops);
+}
+
+module_init(dualpi2_module_init);
+module_exit(dualpi2_module_exit);
+
+MODULE_DESCRIPTION("Dual Queue with Proportional Integral controller Improved with a Square (dualpi2) scheduler");
+MODULE_AUTHOR("Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>");
+MODULE_AUTHOR("Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>");
+MODULE_AUTHOR("Olga Albisser <olga@albisser.org>");
+MODULE_AUTHOR("Henrik Steen <henrist@henrist.net>");
+MODULE_AUTHOR("Olivier Tilmans <olivier.tilmans@nokia.com>");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 037f764822b9..82635dd2cfa5 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -651,6 +651,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
+ for (i = nbands; i < oldbands; i++) {
+ if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+ list_del_init(&q->classes[i].alist);
+ qdisc_purge_queue(q->classes[i].qdisc);
+ }
+
WRITE_ONCE(q->nbands, nbands);
for (i = nstrict; i < q->nstrict; i++) {
if (q->classes[i].qdisc->q.qlen) {
@@ -658,11 +664,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
- for (i = q->nbands; i < oldbands; i++) {
- if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
- list_del_init(&q->classes[i].alist);
- qdisc_purge_queue(q->classes[i].qdisc);
- }
WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap));
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 902ff5470607..fee922da2f99 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1013,11 +1013,11 @@ static int fq_load_priomap(struct fq_sched_data *q,
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_MAX + 1];
- int err, drop_count = 0;
- unsigned drop_len = 0;
u32 fq_log;
+ int err;
err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
NULL);
@@ -1135,16 +1135,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
err = fq_resize(sch, fq_log);
sch_tree_lock(sch);
}
+
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
if (!skb)
break;
- drop_len += qdisc_pkt_len(skb);
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
- drop_count++;
}
- qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return err;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 2a0f3a513bfa..a14142392939 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -366,6 +366,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
u32 quantum = 0;
@@ -443,13 +444,14 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
q->memory_usage > q->memory_limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
- q->cstats.drop_len += qdisc_pkt_len(skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
- q->cstats.drop_count++;
}
- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
- q->cstats.drop_count = 0;
- q->cstats.drop_len = 0;
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index b0e34daf1f75..7b96bc3ff891 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -287,10 +287,9 @@ begin:
static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
- unsigned int len_dropped = 0;
- unsigned int num_dropped = 0;
int err;
err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
@@ -368,11 +367,14 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
- len_dropped += qdisc_pkt_len(skb);
- num_dropped += 1;
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
}
- qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 16afb834fe4a..1e008a228ebd 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -740,6 +740,8 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
err = skb_array_produce(q, skb);
if (unlikely(err)) {
+ tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT);
+
if (qdisc_is_percpu_stats(qdisc))
return qdisc_drop_cpu(skb, qdisc, to_free);
else
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5a7745170e84..d8fd35da32a7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -835,22 +835,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
}
}
-static unsigned int
-qdisc_peek_len(struct Qdisc *sch)
-{
- struct sk_buff *skb;
- unsigned int len;
-
- skb = sch->ops->peek(sch);
- if (unlikely(skb == NULL)) {
- qdisc_warn_nonwc("qdisc_peek_len", sch);
- return 0;
- }
- len = qdisc_pkt_len(skb);
-
- return len;
-}
-
static void
hfsc_adjust_levels(struct hfsc_class *cl)
{
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 5aa434b46707..2d4855e28a28 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -508,9 +508,9 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct hhf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_HHF_MAX + 1];
- unsigned int qlen, prev_backlog;
int err;
u64 non_hh_quantum;
u32 new_quantum = q->quantum;
@@ -561,15 +561,17 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
usecs_to_jiffies(us));
}
- qlen = sch->q.qlen;
- prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 14bf71f57057..b5e40c51655a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->leaf.q);
if (!cl->prio_activity) {
cl->prio_activity = 1 << cl->prio;
@@ -821,7 +821,9 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
- BUG_ON(!hprio->row.rb_node);
+ if (unlikely(!hprio->row.rb_node))
+ return NULL;
+
sp->root = hprio->row.rb_node;
sp->pptr = &hprio->ptr;
sp->pid = &hprio->last_ptr_id;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 51d4013b6121..f3e5ef9a9592 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
static const struct
nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
[TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
- TC_QOPT_MAX_QUEUE),
+ TC_QOPT_MAX_QUEUE - 1),
[TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
TC_FP_PREEMPTIBLE),
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fdd79d3ccd8c..eafc316ae319 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -973,6 +973,41 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
return 0;
}
+static const struct Qdisc_class_ops netem_class_ops;
+
+static int check_netem_in_tree(struct Qdisc *sch, bool duplicates,
+ struct netlink_ext_ack *extack)
+{
+ struct Qdisc *root, *q;
+ unsigned int i;
+
+ root = qdisc_root_sleeping(sch);
+
+ if (sch != root && root->ops->cl_ops == &netem_class_ops) {
+ if (duplicates ||
+ ((struct netem_sched_data *)qdisc_priv(root))->duplicate)
+ goto err;
+ }
+
+ if (!qdisc_dev(root))
+ return 0;
+
+ hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
+ if (sch != q && q->ops->cl_ops == &netem_class_ops) {
+ if (duplicates ||
+ ((struct netem_sched_data *)qdisc_priv(q))->duplicate)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ NL_SET_ERR_MSG(extack,
+ "netem: cannot mix duplicating netems with other netems in tree");
+ return -EINVAL;
+}
+
/* Parse netlink message to set options */
static int netem_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
@@ -1031,6 +1066,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
q->gap = qopt->gap;
q->counter = 0;
q->loss = qopt->loss;
+
+ ret = check_netem_in_tree(sch, qopt->duplicate, extack);
+ if (ret)
+ goto unlock;
+
q->duplicate = qopt->duplicate;
/* for compatibility with earlier versions.
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index ad46ee3ed5a9..0a377313b6a9 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -141,9 +141,9 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
static int pie_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ unsigned int dropped_pkts = 0, dropped_bytes = 0;
struct pie_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_PIE_MAX + 1];
- unsigned int qlen, dropped = 0;
int err;
err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
@@ -193,15 +193,17 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
/* Drop excess packets if new limit is lower */
- qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
- dropped += qdisc_pkt_len(skb);
- qdisc_qstats_backlog_dec(sch, skb);
+ if (!skb)
+ break;
+
+ dropped_pkts++;
+ dropped_bytes += qdisc_pkt_len(skb);
rtnl_qdisc_drop(skb, sch);
}
- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
sch_tree_unlock(sch);
return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index bf1282cb22eb..2255355e51d3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -412,7 +412,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
bool existing = false;
struct nlattr *tb[TCA_QFQ_MAX + 1];
struct qfq_aggregate *new_agg = NULL;
- u32 weight, lmax, inv_w;
+ u32 weight, lmax, inv_w, old_weight, old_lmax;
int err;
int delta_w;
@@ -443,12 +443,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
inv_w = ONE_FP / weight;
weight = ONE_FP / inv_w;
- if (cl != NULL &&
- lmax == cl->agg->lmax &&
- weight == cl->agg->class_weight)
- return 0; /* nothing to change */
+ if (cl != NULL) {
+ sch_tree_lock(sch);
+ old_weight = cl->agg->class_weight;
+ old_lmax = cl->agg->lmax;
+ sch_tree_unlock(sch);
+ if (lmax == old_lmax && weight == old_weight)
+ return 0; /* nothing to change */
+ }
- delta_w = weight - (cl ? cl->agg->class_weight : 0);
+ delta_w = weight - (cl ? old_weight : 0);
if (q->wsum + delta_w > QFQ_MAX_WSUM) {
NL_SET_ERR_MSG_FMT_MOD(extack,
@@ -532,9 +536,6 @@ destroy_class:
static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
{
- struct qfq_sched *q = qdisc_priv(sch);
-
- qfq_rm_from_agg(q, cl);
gen_kill_estimator(&cl->rate_est);
qdisc_put(cl->qdisc);
kfree(cl);
@@ -555,6 +556,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common);
+ qfq_rm_from_agg(q, cl);
sch_tree_unlock(sch);
@@ -625,6 +627,7 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
{
struct qfq_class *cl = (struct qfq_class *)arg;
struct nlattr *nest;
+ u32 class_weight, lmax;
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
@@ -633,8 +636,13 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
- nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
+
+ sch_tree_lock(sch);
+ class_weight = cl->agg->class_weight;
+ lmax = cl->agg->lmax;
+ sch_tree_unlock(sch);
+ if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) ||
+ nla_put_u32(skb, TCA_QFQ_LMAX, lmax))
goto nla_put_failure;
return nla_nest_end(skb, nest);
@@ -651,8 +659,10 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
memset(&xstats, 0, sizeof(xstats));
+ sch_tree_lock(sch);
xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax;
+ sch_tree_unlock(sch);
if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
@@ -989,7 +999,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
list_del_init(&cl->alist);
- else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
+ else if (cl->deficit < qdisc_peek_len(cl->qdisc)) {
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
}
@@ -1491,6 +1501,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode) {
+ qfq_rm_from_agg(q, cl);
qfq_destroy_class(sch, cl);
}
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 14021b812329..39b735386996 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -43,6 +43,11 @@ static struct static_key_false taprio_have_working_mqprio;
#define TAPRIO_SUPPORTED_FLAGS \
(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
+/* Minimum value for picos_per_byte to ensure non-zero duration
+ * for minimum-sized Ethernet frames (ETH_ZLEN = 60).
+ * 60 * 17 > PSEC_PER_NSEC (1000)
+ */
+#define TAPRIO_PICOS_PER_BYTE_MIN 17
struct sched_entry {
/* Durations between this GCL entry and the GCL entry where the
@@ -998,7 +1003,7 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
[TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
- TC_QOPT_MAX_QUEUE),
+ TC_QOPT_MAX_QUEUE - 1),
[TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
[TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
@@ -1284,7 +1289,8 @@ static void taprio_start_sched(struct Qdisc *sch,
}
static void taprio_set_picos_per_byte(struct net_device *dev,
- struct taprio_sched *q)
+ struct taprio_sched *q,
+ struct netlink_ext_ack *extack)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
@@ -1300,6 +1306,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
skip:
picos_per_byte = (USEC_PER_SEC * 8) / speed;
+ if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
+ if (!extack)
+ pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
+ speed);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Link speed %d is too high. Schedule may be inaccurate.",
+ speed);
+ picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
+ }
atomic64_set(&q->picos_per_byte, picos_per_byte);
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1324,17 +1339,19 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
if (dev != qdisc_dev(q->root))
continue;
- taprio_set_picos_per_byte(dev, q);
+ taprio_set_picos_per_byte(dev, q, NULL);
stab = rtnl_dereference(q->root->stab);
- oper = rtnl_dereference(q->oper_sched);
+ rcu_read_lock();
+ oper = rcu_dereference(q->oper_sched);
if (oper)
taprio_update_queue_max_sdu(q, oper, stab);
- admin = rtnl_dereference(q->admin_sched);
+ admin = rcu_dereference(q->admin_sched);
if (admin)
taprio_update_queue_max_sdu(q, admin, stab);
+ rcu_read_unlock();
break;
}
@@ -1696,19 +1713,15 @@ static int taprio_parse_tc_entry(struct Qdisc *sch,
if (err < 0)
return err;
- if (!tb[TCA_TAPRIO_TC_ENTRY_INDEX]) {
+ if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_TAPRIO_TC_ENTRY_INDEX)) {
NL_SET_ERR_MSG_MOD(extack, "TC entry index missing");
return -EINVAL;
}
tc = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_INDEX]);
- if (tc >= TC_QOPT_MAX_QUEUE) {
- NL_SET_ERR_MSG_MOD(extack, "TC entry index out of range");
- return -ERANGE;
- }
-
if (*seen_tcs & BIT(tc)) {
- NL_SET_ERR_MSG_MOD(extack, "Duplicate TC entry");
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_TC_ENTRY_INDEX],
+ "Duplicate tc entry");
return -EINVAL;
}
@@ -1846,7 +1859,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
q->flags = taprio_flags;
/* Needed for length_to_duration() during netlink attribute parsing */
- taprio_set_picos_per_byte(dev, q);
+ taprio_set_picos_per_byte(dev, q, extack);
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index 24d5a35ce894..e947646a380c 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -7,9 +7,9 @@ menuconfig IP_SCTP
tristate "The SCTP Protocol"
depends on INET
depends on IPV6 || IPV6=n
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA1
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
select NET_CRC32C
select NET_UDP_TUNNEL
help
@@ -49,46 +49,25 @@ config SCTP_DBG_OBJCNT
'cat /proc/net/sctp/sctp_dbg_objcnt'
If unsure, say N
+
choice
- prompt "Default SCTP cookie HMAC encoding"
- default SCTP_DEFAULT_COOKIE_HMAC_MD5
+ prompt "Default SCTP cookie authentication method"
+ default SCTP_DEFAULT_COOKIE_HMAC_SHA256
help
- This option sets the default sctp cookie hmac algorithm
- when in doubt select 'md5'
+ This option sets the default SCTP cookie authentication method, for
+ when a method hasn't been explicitly selected via the
+ net.sctp.cookie_hmac_alg sysctl.
-config SCTP_DEFAULT_COOKIE_HMAC_MD5
- bool "Enable optional MD5 hmac cookie generation"
- help
- Enable optional MD5 hmac based SCTP cookie generation
- select SCTP_COOKIE_HMAC_MD5
+ If unsure, choose the default (HMAC-SHA256).
-config SCTP_DEFAULT_COOKIE_HMAC_SHA1
- bool "Enable optional SHA1 hmac cookie generation"
- help
- Enable optional SHA1 hmac based SCTP cookie generation
- select SCTP_COOKIE_HMAC_SHA1
+config SCTP_DEFAULT_COOKIE_HMAC_SHA256
+ bool "HMAC-SHA256"
config SCTP_DEFAULT_COOKIE_HMAC_NONE
- bool "Use no hmac alg in SCTP cookie generation"
- help
- Use no hmac algorithm in SCTP cookie generation
+ bool "None"
endchoice
-config SCTP_COOKIE_HMAC_MD5
- bool "Enable optional MD5 hmac cookie generation"
- help
- Enable optional MD5 hmac based SCTP cookie generation
- select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5
- select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5
-
-config SCTP_COOKIE_HMAC_SHA1
- bool "Enable optional SHA1 hmac cookie generation"
- help
- Enable optional SHA1 hmac based SCTP cookie generation
- select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1
- select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1
-
config INET_SCTP_DIAG
depends on INET_DIAG
def_tristate INET_DIAG
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index c58fffc86a0c..82aad477590e 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -12,36 +12,37 @@
* Vlad Yasevich <vladislav.yasevich@hp.com>
*/
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/scatterlist.h>
#include <net/sctp/sctp.h>
#include <net/sctp/auth.h>
-static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
+static const struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
{
/* id 0 is reserved. as all 0 */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0,
},
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA1,
- .hmac_name = "hmac(sha1)",
- .hmac_len = SCTP_SHA1_SIG_SIZE,
+ .hmac_len = SHA1_DIGEST_SIZE,
},
{
/* id 2 is reserved as well */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2,
},
-#if IS_ENABLED(CONFIG_CRYPTO_SHA256)
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
- .hmac_name = "hmac(sha256)",
- .hmac_len = SCTP_SHA256_SIG_SIZE,
+ .hmac_len = SHA256_DIGEST_SIZE,
}
-#endif
};
+static bool sctp_hmac_supported(__u16 hmac_id)
+{
+ return hmac_id < ARRAY_SIZE(sctp_hmac_list) &&
+ sctp_hmac_list[hmac_id].hmac_len != 0;
+}
void sctp_auth_key_put(struct sctp_auth_bytes *key)
{
@@ -444,76 +445,7 @@ struct sctp_shared_key *sctp_auth_get_shkey(
return NULL;
}
-/*
- * Initialize all the possible digest transforms that we can use. Right
- * now, the supported digests are SHA1 and SHA256. We do this here once
- * because of the restrictiong that transforms may only be allocated in
- * user context. This forces us to pre-allocated all possible transforms
- * at the endpoint init time.
- */
-int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
-{
- struct crypto_shash *tfm = NULL;
- __u16 id;
-
- /* If the transforms are already allocated, we are done */
- if (ep->auth_hmacs)
- return 0;
-
- /* Allocated the array of pointers to transorms */
- ep->auth_hmacs = kcalloc(SCTP_AUTH_NUM_HMACS,
- sizeof(struct crypto_shash *),
- gfp);
- if (!ep->auth_hmacs)
- return -ENOMEM;
-
- for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) {
-
- /* See is we support the id. Supported IDs have name and
- * length fields set, so that we can allocated and use
- * them. We can safely just check for name, for without the
- * name, we can't allocate the TFM.
- */
- if (!sctp_hmac_list[id].hmac_name)
- continue;
-
- /* If this TFM has been allocated, we are all set */
- if (ep->auth_hmacs[id])
- continue;
-
- /* Allocate the ID */
- tfm = crypto_alloc_shash(sctp_hmac_list[id].hmac_name, 0, 0);
- if (IS_ERR(tfm))
- goto out_err;
-
- ep->auth_hmacs[id] = tfm;
- }
-
- return 0;
-
-out_err:
- /* Clean up any successful allocations */
- sctp_auth_destroy_hmacs(ep->auth_hmacs);
- ep->auth_hmacs = NULL;
- return -ENOMEM;
-}
-
-/* Destroy the hmac tfm array */
-void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[])
-{
- int i;
-
- if (!auth_hmacs)
- return;
-
- for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) {
- crypto_free_shash(auth_hmacs[i]);
- }
- kfree(auth_hmacs);
-}
-
-
-struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id)
+const struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id)
{
return &sctp_hmac_list[hmac_id];
}
@@ -521,7 +453,8 @@ struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id)
/* Get an hmac description information that we can use to build
* the AUTH chunk
*/
-struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
+const struct sctp_hmac *
+sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
{
struct sctp_hmac_algo_param *hmacs;
__u16 n_elt;
@@ -543,26 +476,10 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
sizeof(struct sctp_paramhdr)) >> 1;
for (i = 0; i < n_elt; i++) {
id = ntohs(hmacs->hmac_ids[i]);
-
- /* Check the id is in the supported range. And
- * see if we support the id. Supported IDs have name and
- * length fields set, so that we can allocate and use
- * them. We can safely just check for name, for without the
- * name, we can't allocate the TFM.
- */
- if (id > SCTP_AUTH_HMAC_ID_MAX ||
- !sctp_hmac_list[id].hmac_name) {
- id = 0;
- continue;
- }
-
- break;
+ if (sctp_hmac_supported(id))
+ return &sctp_hmac_list[id];
}
-
- if (id == 0)
- return NULL;
-
- return &sctp_hmac_list[id];
+ return NULL;
}
static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
@@ -606,7 +523,6 @@ int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
struct sctp_hmac_algo_param *hmacs)
{
- struct sctp_endpoint *ep;
__u16 id;
int i;
int n_params;
@@ -617,16 +533,9 @@ void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
n_params = (ntohs(hmacs->param_hdr.length) -
sizeof(struct sctp_paramhdr)) >> 1;
- ep = asoc->ep;
for (i = 0; i < n_params; i++) {
id = ntohs(hmacs->hmac_ids[i]);
-
- /* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX)
- continue;
-
- /* If this TFM has been allocated, use this id */
- if (ep->auth_hmacs[id]) {
+ if (sctp_hmac_supported(id)) {
asoc->default_hmac_id = id;
break;
}
@@ -709,10 +618,9 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
struct sctp_shared_key *ep_key, gfp_t gfp)
{
struct sctp_auth_bytes *asoc_key;
- struct crypto_shash *tfm;
__u16 key_id, hmac_id;
- unsigned char *end;
int free_key = 0;
+ size_t data_len;
__u8 *digest;
/* Extract the info we need:
@@ -733,19 +641,17 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
free_key = 1;
}
- /* set up scatter list */
- end = skb_tail_pointer(skb);
-
- tfm = asoc->ep->auth_hmacs[hmac_id];
-
+ data_len = skb_tail_pointer(skb) - (unsigned char *)auth;
digest = (u8 *)(&auth->auth_hdr + 1);
- if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len))
- goto free;
-
- crypto_shash_tfm_digest(tfm, (u8 *)auth, end - (unsigned char *)auth,
- digest);
+ if (hmac_id == SCTP_AUTH_HMAC_ID_SHA1) {
+ hmac_sha1_usingrawkey(asoc_key->data, asoc_key->len,
+ (const u8 *)auth, data_len, digest);
+ } else {
+ WARN_ON_ONCE(hmac_id != SCTP_AUTH_HMAC_ID_SHA256);
+ hmac_sha256_usingrawkey(asoc_key->data, asoc_key->len,
+ (const u8 *)auth, data_len, digest);
+ }
-free:
if (free_key)
sctp_auth_key_put(asoc_key);
}
@@ -788,14 +694,11 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
for (i = 0; i < hmacs->shmac_num_idents; i++) {
id = hmacs->shmac_idents[i];
- if (id > SCTP_AUTH_HMAC_ID_MAX)
+ if (!sctp_hmac_supported(id))
return -EOPNOTSUPP;
if (SCTP_AUTH_HMAC_ID_SHA1 == id)
has_sha1 = 1;
-
- if (!sctp_hmac_list[id].hmac_name)
- return -EOPNOTSUPP;
}
if (!has_sha1)
@@ -1021,8 +924,6 @@ int sctp_auth_deact_key_id(struct sctp_endpoint *ep,
int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp)
{
- int err = -ENOMEM;
-
/* Allocate space for HMACS and CHUNKS authentication
* variables. There are arrays that we encode directly
* into parameters to make the rest of the operations easier.
@@ -1060,13 +961,6 @@ int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp)
ep->auth_chunk_list = auth_chunks;
}
- /* Allocate and initialize transorms arrays for supported
- * HMACs.
- */
- err = sctp_auth_init_hmacs(ep, gfp);
- if (err)
- goto nomem;
-
return 0;
nomem:
@@ -1075,7 +969,7 @@ nomem:
kfree(ep->auth_chunk_list);
ep->auth_hmacs_list = NULL;
ep->auth_chunk_list = NULL;
- return err;
+ return -ENOMEM;
}
void sctp_auth_free(struct sctp_endpoint *ep)
@@ -1084,6 +978,4 @@ void sctp_auth_free(struct sctp_endpoint *ep)
kfree(ep->auth_chunk_list);
ep->auth_hmacs_list = NULL;
ep->auth_chunk_list = NULL;
- sctp_auth_destroy_hmacs(ep->auth_hmacs);
- ep->auth_hmacs = NULL;
}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index fd4f8243cc35..c655b571ca01 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -184,7 +184,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
* DATA.
*/
if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
- struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
+ const struct sctp_hmac *hmac_desc =
+ sctp_auth_asoc_get_hmac(asoc);
if (hmac_desc)
max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) +
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 23359e522273..996c2018f0e6 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -173,7 +173,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
- mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
+ mem[SK_MEMINFO_DROPS] = sk_drops_read(sk);
if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
goto errout;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 7e77b450697c..31e989dfe846 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -35,6 +35,15 @@
/* Forward declarations for internal helpers. */
static void sctp_endpoint_bh_rcv(struct work_struct *work);
+static void gen_cookie_auth_key(struct hmac_sha256_key *key)
+{
+ u8 raw_key[SCTP_COOKIE_KEY_SIZE];
+
+ get_random_bytes(raw_key, sizeof(raw_key));
+ hmac_sha256_preparekey(key, raw_key, sizeof(raw_key));
+ memzero_explicit(raw_key, sizeof(raw_key));
+}
+
/*
* Initialize the base fields of the endpoint structure.
*/
@@ -45,10 +54,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
struct net *net = sock_net(sk);
struct sctp_shared_key *null_key;
- ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
- if (!ep->digest)
- return NULL;
-
ep->asconf_enable = net->sctp.addip_enable;
ep->auth_enable = net->sctp.auth_enable;
if (ep->auth_enable) {
@@ -90,8 +95,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Get the receive buffer policy for this endpoint */
ep->rcvbuf_policy = net->sctp.rcvbuf_policy;
- /* Initialize the secret key used with cookie. */
- get_random_bytes(ep->secret_key, sizeof(ep->secret_key));
+ /* Generate the cookie authentication key. */
+ gen_cookie_auth_key(&ep->cookie_auth_key);
/* SCTP-AUTH extensions*/
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
@@ -118,7 +123,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
nomem_shkey:
sctp_auth_free(ep);
nomem:
- kfree(ep->digest);
return NULL;
}
@@ -205,9 +209,6 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
return;
}
- /* Free the digest buffer */
- kfree(ep->digest);
-
/* SCTP-AUTH: Free up AUTH releated data such as shared keys
* chunks and hmacs arrays that were allocated
*/
@@ -218,7 +219,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
sctp_inq_free(&ep->base.inqueue);
sctp_bind_addr_free(&ep->base.bind_addr);
- memset(ep->secret_key, 0, sizeof(ep->secret_key));
+ memzero_explicit(&ep->cookie_auth_key, sizeof(ep->cookie_auth_key));
sk = ep->base.sk;
/* Remove and free the port */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0c0d2757f6f8..7e99894778d4 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
* it's better to just linearize it otherwise crc computing
* takes longer.
*/
- if ((!is_gso && skb_linearize(skb)) ||
+ if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
goto discard_it;
@@ -756,7 +756,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk2 = ep2->base.sk;
if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
- !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
+ !uid_eq(sk_uid(sk2), sk_uid(sk)) ||
!sk2->sk_reuseport)
continue;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 5c1652181805..f5a7d5a38755 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -169,13 +169,14 @@ next_chunk:
chunk->head_skb = chunk->skb;
/* skbs with "cover letter" */
- if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
+ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) {
+ if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev),
+ SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
chunk->skb = skb_shinfo(chunk->skb)->frag_list;
-
- if (WARN_ON(!chunk->skb)) {
- __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
- sctp_chunk_free(chunk);
- goto next_chunk;
}
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a9ed2ccab1bd..568ff8797c39 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -261,9 +261,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6);
- return udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr,
- &fl6->daddr, tclass, ip6_dst_hoplimit(dst),
- label, sctp_sk(sk)->udp_port, t->encap_port, false);
+ udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr,
+ tclass, ip6_dst_hoplimit(dst), label,
+ sctp_sk(sk)->udp_port, t->encap_port, false, 0);
+ return 0;
}
/* Returns the dst cache entry for the given source and destination ip
@@ -546,7 +547,9 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
+ addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
+ addr->v6.sin6_scope_id = 0;
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ec00ee75d59a..1ed281f3c355 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -52,21 +52,21 @@ static const struct snmp_mib sctp_snmp_list[] = {
SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG),
SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS),
SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS),
- SNMP_MIB_SENTINEL
};
/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */
static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
{
- unsigned long buff[SCTP_MIB_MAX];
+ unsigned long buff[ARRAY_SIZE(sctp_snmp_list)];
+ const int cnt = ARRAY_SIZE(sctp_snmp_list);
struct net *net = seq->private;
int i;
- memset(buff, 0, sizeof(unsigned long) * SCTP_MIB_MAX);
+ memset(buff, 0, sizeof(buff));
- snmp_get_cpu_field_batch(buff, sctp_snmp_list,
- net->sctp.sctp_statistics);
- for (i = 0; sctp_snmp_list[i].name; i++)
+ snmp_get_cpu_field_batch_cnt(buff, sctp_snmp_list, cnt,
+ net->sctp.sctp_statistics);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
buff[i]);
@@ -177,7 +177,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
ep->base.bind_addr.port,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
sctp_seq_dump_local_addrs(seq, &ep->base);
@@ -267,7 +267,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->assoc_id,
assoc->sndbuf_used,
atomic_read(&assoc->rmem_alloc),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
epb->bind_addr.port,
assoc->peer.port);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f402f90eb6b6..9dbc24af749b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -34,6 +34,7 @@
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <net/flow.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
@@ -437,7 +438,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
fl4->fl4_dport = daddr->v4.sin_port;
fl4->flowi4_proto = IPPROTO_SCTP;
if (asoc) {
- fl4->flowi4_tos = inet_dscp_to_dsfield(dscp);
+ fl4->flowi4_dscp = dscp;
fl4->flowi4_scope = ip_sock_rt_scope(asoc->base.sk);
fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
fl4->fl4_sport = htons(asoc->base.bind_addr.port);
@@ -1103,7 +1104,8 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
- sctp_sk(sk)->udp_port, t->encap_port, false, false);
+ sctp_sk(sk)->udp_port, t->encap_port, false, false,
+ 0);
return 0;
}
@@ -1333,14 +1335,9 @@ static int __net_init sctp_defaults_init(struct net *net)
/* Whether Cookie Preservative is enabled(1) or not(0) */
net->sctp.cookie_preserve_enable = 1;
- /* Default sctp sockets to use md5 as their hmac alg */
-#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
- net->sctp.sctp_hmac_alg = "md5";
-#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
- net->sctp.sctp_hmac_alg = "sha1";
-#else
- net->sctp.sctp_hmac_alg = NULL;
-#endif
+ /* Whether cookie authentication is enabled(1) or not(0) */
+ net->sctp.cookie_auth_enable =
+ !IS_ENABLED(CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE);
/* Max.Burst - 4 */
net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 3ead591c72fd..2c0017d058d4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -30,7 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
@@ -1319,7 +1319,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc,
__u16 key_id)
{
struct sctp_authhdr auth_hdr;
- struct sctp_hmac *hmac_desc;
+ const struct sctp_hmac *hmac_desc;
struct sctp_chunk *retval;
/* Get the first hmac that the peer told us to use */
@@ -1674,8 +1674,10 @@ static struct sctp_cookie_param *sctp_pack_cookie(
* out on the network.
*/
retval = kzalloc(*cookie_len, GFP_ATOMIC);
- if (!retval)
- goto nodata;
+ if (!retval) {
+ *cookie_len = 0;
+ return NULL;
+ }
cookie = (struct sctp_signed_cookie *) retval->body;
@@ -1706,26 +1708,14 @@ static struct sctp_cookie_param *sctp_pack_cookie(
memcpy((__u8 *)(cookie + 1) +
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
- if (sctp_sk(ep->base.sk)->hmac) {
- struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
- int err;
-
- /* Sign the message. */
- err = crypto_shash_setkey(tfm, ep->secret_key,
- sizeof(ep->secret_key)) ?:
- crypto_shash_tfm_digest(tfm, (u8 *)&cookie->c, bodysize,
- cookie->signature);
- if (err)
- goto free_cookie;
+ /* Sign the cookie, if cookie authentication is enabled. */
+ if (sctp_sk(ep->base.sk)->cookie_auth_enable) {
+ static_assert(sizeof(cookie->mac) == SHA256_DIGEST_SIZE);
+ hmac_sha256(&ep->cookie_auth_key, (const u8 *)&cookie->c,
+ bodysize, cookie->mac);
}
return retval;
-
-free_cookie:
- kfree(retval);
-nodata:
- *cookie_len = 0;
- return NULL;
}
/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
@@ -1740,7 +1730,6 @@ struct sctp_association *sctp_unpack_cookie(
struct sctp_signed_cookie *cookie;
struct sk_buff *skb = chunk->skb;
struct sctp_cookie *bear_cookie;
- __u8 *digest = ep->digest;
enum sctp_scope scope;
unsigned int len;
ktime_t kt;
@@ -1770,30 +1759,19 @@ struct sctp_association *sctp_unpack_cookie(
cookie = chunk->subh.cookie_hdr;
bear_cookie = &cookie->c;
- if (!sctp_sk(ep->base.sk)->hmac)
- goto no_hmac;
+ /* Verify the cookie's MAC, if cookie authentication is enabled. */
+ if (sctp_sk(ep->base.sk)->cookie_auth_enable) {
+ u8 mac[SHA256_DIGEST_SIZE];
- /* Check the signature. */
- {
- struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac;
- int err;
-
- err = crypto_shash_setkey(tfm, ep->secret_key,
- sizeof(ep->secret_key)) ?:
- crypto_shash_tfm_digest(tfm, (u8 *)bear_cookie, bodysize,
- digest);
- if (err) {
- *error = -SCTP_IERROR_NOMEM;
+ hmac_sha256(&ep->cookie_auth_key, (const u8 *)bear_cookie,
+ bodysize, mac);
+ static_assert(sizeof(cookie->mac) == sizeof(mac));
+ if (crypto_memneq(mac, cookie->mac, sizeof(mac))) {
+ *error = -SCTP_IERROR_BAD_SIG;
goto fail;
}
}
- if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
- *error = -SCTP_IERROR_BAD_SIG;
- goto fail;
- }
-
-no_hmac:
/* IG Section 2.35.2:
* 3) Compare the port numbers and the verification tag contained
* within the COOKIE ECHO chunk to the actual port numbers and the
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a0524ba8d787..3755ba079d07 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <crypto/utils.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
@@ -885,7 +886,8 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
return SCTP_DISPOSITION_CONSUME;
nomem_authev:
- sctp_ulpevent_free(ai_ev);
+ if (ai_ev)
+ sctp_ulpevent_free(ai_ev);
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
@@ -4361,7 +4363,7 @@ static enum sctp_ierror sctp_sf_authenticate(
struct sctp_shared_key *sh_key = NULL;
struct sctp_authhdr *auth_hdr;
__u8 *save_digest, *digest;
- struct sctp_hmac *hmac;
+ const struct sctp_hmac *hmac;
unsigned int sig_len;
__u16 key_id;
@@ -4416,7 +4418,7 @@ static enum sctp_ierror sctp_sf_authenticate(
sh_key, GFP_ATOMIC);
/* Discard the packet if the digests do not match */
- if (memcmp(save_digest, digest, sig_len)) {
+ if (crypto_memneq(save_digest, digest, sig_len)) {
kfree(save_digest);
return SCTP_IERROR_BAD_SIG;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1e5739858c20..ed8293a34240 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -37,7 +37,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
@@ -4987,7 +4986,7 @@ static int sctp_init_sock(struct sock *sk)
sp->default_rcv_context = 0;
sp->max_burst = net->sctp.max_burst;
- sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
+ sp->cookie_auth_enable = net->sctp.cookie_auth_enable;
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
@@ -5079,8 +5078,6 @@ static int sctp_init_sock(struct sock *sk)
if (!sp->ep)
return -ENOMEM;
- sp->hmac = NULL;
-
sk->sk_destruct = sctp_destruct_sock;
SCTP_DBG_OBJCNT_INC(sock);
@@ -5117,18 +5114,8 @@ static void sctp_destroy_sock(struct sock *sk)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
}
-/* Triggered when there are no references on the socket anymore */
-static void sctp_destruct_common(struct sock *sk)
-{
- struct sctp_sock *sp = sctp_sk(sk);
-
- /* Free up the HMAC transform. */
- crypto_free_shash(sp->hmac);
-}
-
static void sctp_destruct_sock(struct sock *sk)
{
- sctp_destruct_common(sk);
inet_sock_destruct(sk);
}
@@ -8345,8 +8332,8 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
bool reuse = (sk->sk_reuse || sp->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
struct net *net = sock_net(sk);
- kuid_t uid = sock_i_uid(sk);
struct sctp_bind_bucket *pp;
+ kuid_t uid = sk_uid(sk);
unsigned short snum;
int ret;
@@ -8444,7 +8431,7 @@ pp_found:
(reuse && (sk2->sk_reuse || sp2->reuse) &&
sk2->sk_state != SCTP_SS_LISTENING) ||
(sk->sk_reuseport && sk2->sk_reuseport &&
- uid_eq(uid, sock_i_uid(sk2))))
+ uid_eq(uid, sk_uid(sk2))))
continue;
if ((!sk->sk_bound_dev_if || !bound_dev_if2 ||
@@ -8530,22 +8517,8 @@ static int sctp_listen_start(struct sock *sk, int backlog)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
- struct crypto_shash *tfm = NULL;
- char alg[32];
int err;
- /* Allocate HMAC for generating cookie. */
- if (!sp->hmac && sp->sctp_hmac_alg) {
- sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
- tfm = crypto_alloc_shash(alg, 0, 0);
- if (IS_ERR(tfm)) {
- net_info_ratelimited("failed to load transform for %s: %ld\n",
- sp->sctp_hmac_alg, PTR_ERR(tfm));
- return -ENOSYS;
- }
- sctp_sk(sk)->hmac = tfm;
- }
-
/*
* If a bind() or sctp_bindx() is not called prior to a listen()
* call that allows new associations to be accepted, the system
@@ -9492,8 +9465,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newsk->sk_sndbuf = sk->sk_sndbuf;
newsk->sk_rcvbuf = sk->sk_rcvbuf;
newsk->sk_lingertime = sk->sk_lingertime;
- newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
- newsk->sk_sndtimeo = sk->sk_sndtimeo;
+ newsk->sk_rcvtimeo = READ_ONCE(sk->sk_rcvtimeo);
+ newsk->sk_sndtimeo = READ_ONCE(sk->sk_sndtimeo);
newsk->sk_rxhash = sk->sk_rxhash;
newinet = inet_sk(newsk);
@@ -9561,7 +9534,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* copy.
*/
newsp->ep = newep;
- newsp->hmac = NULL;
/* Hook this new socket in to the bind_hash list. */
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
@@ -9581,16 +9553,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
if (err)
return err;
- /* New ep's auth_hmacs should be set if old ep's is set, in case
- * that net->sctp.auth_enable has been changed to 0 by users and
- * new ep's auth_hmacs couldn't be set in sctp_endpoint_init().
- */
- if (oldsp->ep->auth_hmacs) {
- err = sctp_auth_init_hmacs(newsp->ep, GFP_KERNEL);
- if (err)
- return err;
- }
-
sctp_auto_asconf_init(newsp);
/* Move any messages in the old socket's receive queue that are for the
@@ -9723,7 +9685,6 @@ struct proto sctp_prot = {
static void sctp_v6_destruct_sock(struct sock *sk)
{
- sctp_destruct_common(sk);
inet6_sock_destruct(sk);
}
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ee3eac338a9d..15e7db9a3ab2 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -174,7 +174,7 @@ static struct ctl_table sctp_net_table[] = {
},
{
.procname = "cookie_hmac_alg",
- .data = &init_net.sctp.sctp_hmac_alg,
+ .data = &init_net.sctp.cookie_auth_enable,
.maxlen = 8,
.mode = 0644,
.proc_handler = proc_sctp_do_hmac_alg,
@@ -388,10 +388,8 @@ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = container_of(ctl->data, struct net,
- sctp.sctp_hmac_alg);
+ sctp.cookie_auth_enable);
struct ctl_table tbl;
- bool changed = false;
- char *none = "none";
char tmp[8] = {0};
int ret;
@@ -399,35 +397,26 @@ static int proc_sctp_do_hmac_alg(const struct ctl_table *ctl, int write,
if (write) {
tbl.data = tmp;
- tbl.maxlen = sizeof(tmp);
- } else {
- tbl.data = net->sctp.sctp_hmac_alg ? : none;
- tbl.maxlen = strlen(tbl.data);
- }
-
- ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
- if (write && ret == 0) {
-#ifdef CONFIG_CRYPTO_MD5
- if (!strncmp(tmp, "md5", 3)) {
- net->sctp.sctp_hmac_alg = "md5";
- changed = true;
+ tbl.maxlen = sizeof(tmp) - 1;
+ ret = proc_dostring(&tbl, 1, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+ if (!strcmp(tmp, "sha256")) {
+ net->sctp.cookie_auth_enable = 1;
+ return 0;
}
-#endif
-#ifdef CONFIG_CRYPTO_SHA1
- if (!strncmp(tmp, "sha1", 4)) {
- net->sctp.sctp_hmac_alg = "sha1";
- changed = true;
+ if (!strcmp(tmp, "none")) {
+ net->sctp.cookie_auth_enable = 0;
+ return 0;
}
-#endif
- if (!strncmp(tmp, "none", 4)) {
- net->sctp.sctp_hmac_alg = NULL;
- changed = true;
- }
- if (!changed)
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
+ if (net->sctp.cookie_auth_enable)
+ tbl.data = (char *)"sha256";
+ else
+ tbl.data = (char *)"none";
+ tbl.maxlen = strlen(tbl.data);
+ return proc_dostring(&tbl, 0, buffer, lenp, ppos);
}
static int proc_sctp_do_rto_min(const struct ctl_table *ctl, int write,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 6946c1462793..4d258a6e8033 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -240,7 +240,7 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
{
/* If we don't have a fresh route, look one up */
- if (!transport->dst || transport->dst->obsolete) {
+ if (!transport->dst || READ_ONCE(transport->dst->obsolete)) {
sctp_transport_dst_release(transport);
transport->af_specific->get_dst(transport, &transport->saddr,
&transport->fl, sk);
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index ba5e6a2dd2fd..99ecd59d1f4b 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config SMC
tristate "SMC socket protocol family"
- depends on INET && INFINIBAND
- depends on m || ISM != m
+ depends on INET && INFINIBAND && DIBS
help
SMC-R provides a "sockets over RDMA" solution making use of
RDMA over Converged Ethernet (RoCE) technology to upgrade
@@ -20,16 +19,3 @@ config SMC_DIAG
smcss.
if unsure, say Y.
-
-config SMC_LO
- bool "SMC intra-OS shortcut with loopback-ism"
- depends on SMC
- default n
- help
- SMC_LO enables the creation of an Emulated-ISM device named
- loopback-ism in SMC and makes use of it for transferring data
- when communication occurs within the same OS. This helps in
- convenient testing of SMC-D since loopback-ism is independent
- of architecture or hardware.
-
- if unsure, say N.
diff --git a/net/smc/Makefile b/net/smc/Makefile
index 60f1c87d5212..0e754cbc38f9 100644
--- a/net/smc/Makefile
+++ b/net/smc/Makefile
@@ -6,4 +6,3 @@ smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
smc-y += smc_tracepoint.o smc_inet.o
smc-$(CONFIG_SYSCTL) += smc_sysctl.o
-smc-$(CONFIG_SMC_LO) += smc_loopback.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 3760131f1484..77b99e8ef35a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -30,6 +30,10 @@
#include <linux/splice.h>
#include <net/sock.h>
+#include <net/inet_common.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#endif
#include <net/tcp.h>
#include <net/smc.h>
#include <asm/ioctls.h>
@@ -53,7 +57,6 @@
#include "smc_stats.h"
#include "smc_tracepoint.h"
#include "smc_sysctl.h"
-#include "smc_loopback.h"
#include "smc_inet.h"
static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
@@ -360,6 +363,16 @@ static void smc_destruct(struct sock *sk)
return;
if (!sock_flag(sk, SOCK_DEAD))
return;
+ switch (sk->sk_family) {
+ case AF_INET:
+ inet_sock_destruct(sk);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ inet6_sock_destruct(sk);
+ break;
+#endif
+ }
}
static struct lock_class_key smc_key;
@@ -486,8 +499,8 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
{
/* options we don't get control via setsockopt for */
nsk->sk_type = osk->sk_type;
- nsk->sk_sndtimeo = osk->sk_sndtimeo;
- nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
+ nsk->sk_sndtimeo = READ_ONCE(osk->sk_sndtimeo);
+ nsk->sk_rcvtimeo = READ_ONCE(osk->sk_rcvtimeo);
nsk->sk_mark = READ_ONCE(osk->sk_mark);
nsk->sk_priority = READ_ONCE(osk->sk_priority);
nsk->sk_rcvlowat = osk->sk_rcvlowat;
@@ -1083,8 +1096,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
}
/* Check for VLAN ID and register it on ISM device just for CLC handshake */
-static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
- struct smc_init_info *ini)
+static int smc_connect_ism_vlan_setup(struct smc_init_info *ini)
{
if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
return SMC_CLC_DECL_ISMVLANERR;
@@ -1099,7 +1111,7 @@ static int smc_find_proposal_devices(struct smc_sock *smc,
/* check if there is an ism device available */
if (!(ini->smcd_version & SMC_V1) ||
smc_find_ism_device(smc, ini) ||
- smc_connect_ism_vlan_setup(smc, ini))
+ smc_connect_ism_vlan_setup(ini))
ini->smcd_version &= ~SMC_V1;
/* else ISM V1 is supported for this connection */
@@ -1144,8 +1156,7 @@ static int smc_find_proposal_devices(struct smc_sock *smc,
/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
* used, the VLAN ID will be registered again during the connection setup.
*/
-static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
- struct smc_init_info *ini)
+static int smc_connect_ism_vlan_cleanup(struct smc_init_info *ini)
{
if (!smcd_indicated(ini->smc_type_v1))
return 0;
@@ -1568,13 +1579,13 @@ static int __smc_connect(struct smc_sock *smc)
goto vlan_cleanup;
SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
- smc_connect_ism_vlan_cleanup(smc, ini);
+ smc_connect_ism_vlan_cleanup(ini);
kfree(buf);
kfree(ini);
return 0;
vlan_cleanup:
- smc_connect_ism_vlan_cleanup(smc, ini);
+ smc_connect_ism_vlan_cleanup(ini);
kfree(buf);
fallback:
kfree(ini);
@@ -1585,7 +1596,7 @@ static void smc_connect_work(struct work_struct *work)
{
struct smc_sock *smc = container_of(work, struct smc_sock,
connect_work);
- long timeo = smc->sk.sk_sndtimeo;
+ long timeo = READ_ONCE(smc->sk.sk_sndtimeo);
int rc = 0;
if (!timeo)
@@ -2554,8 +2565,9 @@ static void smc_listen_work(struct work_struct *work)
goto out_decl;
}
- smc_listen_out_connected(new_smc);
SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
+ /* smc_listen_out() will release smcsk */
+ smc_listen_out_connected(new_smc);
goto out_free;
out_unlock:
@@ -2735,8 +2747,7 @@ int smc_accept(struct socket *sock, struct socket *new_sock,
if (lsmc->sockopt_defer_accept && !(arg->flags & O_NONBLOCK)) {
/* wait till data arrives on the socket */
- timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
- MSEC_PER_SEC);
+ timeo = secs_to_jiffies(lsmc->sockopt_defer_accept);
if (smc_sk(nsk)->use_fallback) {
struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
@@ -3523,15 +3534,15 @@ static int __init smc_init(void)
rc = -ENOMEM;
- smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
+ smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", WQ_PERCPU, 0);
if (!smc_tcp_ls_wq)
goto out_pnet;
- smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
+ smc_hs_wq = alloc_workqueue("smc_hs_wq", WQ_PERCPU, 0);
if (!smc_hs_wq)
goto out_alloc_tcp_ls_wq;
- smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
+ smc_close_wq = alloc_workqueue("smc_close_wq", WQ_PERCPU, 0);
if (!smc_close_wq)
goto out_alloc_hs_wq;
@@ -3579,16 +3590,10 @@ static int __init smc_init(void)
goto out_sock;
}
- rc = smc_loopback_init();
- if (rc) {
- pr_err("%s: smc_loopback_init fails with %d\n", __func__, rc);
- goto out_ib;
- }
-
rc = tcp_register_ulp(&smc_ulp_ops);
if (rc) {
pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
- goto out_lo;
+ goto out_ib;
}
rc = smc_inet_init();
if (rc) {
@@ -3599,8 +3604,6 @@ static int __init smc_init(void)
return 0;
out_ulp:
tcp_unregister_ulp(&smc_ulp_ops);
-out_lo:
- smc_loopback_exit();
out_ib:
smc_ib_unregister_client();
out_sock:
@@ -3639,7 +3642,6 @@ static void __exit smc_exit(void)
tcp_unregister_ulp(&smc_ulp_ops);
sock_unregister(PF_SMC);
smc_core_exit();
- smc_loopback_exit();
smc_ib_unregister_client();
smc_ism_exit();
destroy_workqueue(smc_close_wq);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 78ae10d06ed2..2c9084963739 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -283,10 +283,10 @@ struct smc_connection {
};
struct smc_sock { /* smc sock container */
- struct sock sk;
-#if IS_ENABLED(CONFIG_IPV6)
- struct ipv6_pinfo *pinet6;
-#endif
+ union {
+ struct sock sk;
+ struct inet_sock icsk_inet;
+ };
struct socket *clcsock; /* internal tcp socket */
void (*clcsk_state_change)(struct sock *sk);
/* original stat_change fct. */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 521f5df80e10..157aace169d4 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -426,8 +426,6 @@ smc_clc_msg_decl_valid(struct smc_clc_msg_decline *dclc)
{
struct smc_clc_msg_hdr *hdr = &dclc->hdr;
- if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D)
- return false;
if (hdr->version == SMC_V1) {
if (ntohs(hdr->length) != sizeof(struct smc_clc_msg_decline))
return false;
@@ -511,10 +509,10 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
}
/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
-static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
+static int smc_clc_prfx_set4_rcu(struct net_device *dev, __be32 ipv4,
struct smc_clc_msg_proposal_prefix *prop)
{
- struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
const struct in_ifaddr *ifa;
if (!in_dev)
@@ -532,12 +530,12 @@ static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
}
/* fill CLC proposal msg with ipv6 prefixes from device */
-static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
+static int smc_clc_prfx_set6_rcu(struct net_device *dev,
struct smc_clc_msg_proposal_prefix *prop,
struct smc_clc_ipv6_prefix *ipv6_prfx)
{
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
+ struct inet6_dev *in6_dev = __in6_dev_get(dev);
struct inet6_ifaddr *ifa;
int cnt = 0;
@@ -566,41 +564,44 @@ static int smc_clc_prfx_set(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop,
struct smc_clc_ipv6_prefix *ipv6_prfx)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
struct sockaddr_storage addrs;
struct sockaddr_in6 *addr6;
struct sockaddr_in *addr;
+ struct net_device *dev;
+ struct dst_entry *dst;
int rc = -ENOENT;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
- rc = -ENODEV;
- goto out_rel;
- }
/* get address to which the internal TCP socket is bound */
if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
- goto out_rel;
+ goto out;
+
/* analyze IP specific data of net_device belonging to TCP socket */
addr6 = (struct sockaddr_in6 *)&addrs;
+
rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!dev) {
+ rc = -ENODEV;
+ goto out_unlock;
+ }
+
if (addrs.ss_family == PF_INET) {
/* IPv4 */
addr = (struct sockaddr_in *)&addrs;
- rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
+ rc = smc_clc_prfx_set4_rcu(dev, addr->sin_addr.s_addr, prop);
} else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
/* mapped IPv4 address - peer is IPv4 only */
- rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
+ rc = smc_clc_prfx_set4_rcu(dev, addr6->sin6_addr.s6_addr32[3],
prop);
} else {
/* IPv6 */
- rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
+ rc = smc_clc_prfx_set6_rcu(dev, prop, ipv6_prfx);
}
+
+out_unlock:
rcu_read_unlock();
-out_rel:
- dst_release(dst);
out:
return rc;
}
@@ -656,26 +657,26 @@ static int smc_clc_prfx_match6_rcu(struct net_device *dev,
int smc_clc_prfx_match(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
int rc;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
+ rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!dev) {
rc = -ENODEV;
- goto out_rel;
+ goto out;
}
- rcu_read_lock();
+
if (!prop->ipv6_prefixes_cnt)
- rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
+ rc = smc_clc_prfx_match4_rcu(dev, prop);
else
- rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
- rcu_read_unlock();
-out_rel:
- dst_release(dst);
+ rc = smc_clc_prfx_match6_rcu(dev, prop);
out:
+ rcu_read_unlock();
+
return rc;
}
@@ -688,7 +689,7 @@ out:
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type, unsigned long timeout)
{
- long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
+ long rcvtimeo = READ_ONCE(smc->clcsock->sk->sk_rcvtimeo);
struct sock *clc_sk = smc->clcsock->sk;
struct smc_clc_msg_hdr *clcm = buf;
struct msghdr msg = {NULL, 0};
@@ -707,7 +708,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
* sizeof(struct smc_clc_msg_hdr)
*/
krflags = MSG_PEEK | MSG_WAITALL;
- clc_sk->sk_rcvtimeo = timeout;
+ WRITE_ONCE(clc_sk->sk_rcvtimeo, timeout);
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1,
sizeof(struct smc_clc_msg_hdr));
len = sock_recvmsg(smc->clcsock, &msg, krflags);
@@ -795,7 +796,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
out:
- clc_sk->sk_rcvtimeo = rcvtimeo;
+ WRITE_ONCE(clc_sk->sk_rcvtimeo, rcvtimeo);
return reason_code;
}
@@ -915,7 +916,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
/* add SMC-D specifics */
if (ini->ism_dev[0]) {
smcd = ini->ism_dev[0];
- smcd->ops->get_local_gid(smcd, &smcd_gid);
+ copy_to_smcdgid(&smcd_gid, &smcd->dibs->gid);
pclc_smcd->ism.gid = htonll(smcd_gid.gid);
pclc_smcd->ism.chid =
htons(smc_ism_get_chid(ini->ism_dev[0]));
@@ -965,7 +966,7 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
if (ini->ism_offered_cnt) {
for (i = 1; i <= ini->ism_offered_cnt; i++) {
smcd = ini->ism_dev[i];
- smcd->ops->get_local_gid(smcd, &smcd_gid);
+ copy_to_smcdgid(&smcd_gid, &smcd->dibs->gid);
gidchids[entry].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
gidchids[entry].gid = htonll(smcd_gid.gid);
@@ -1058,7 +1059,7 @@ smcd_clc_prep_confirm_accept(struct smc_connection *conn,
/* SMC-D specific settings */
memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
sizeof(SMCD_EYECATCHER));
- smcd->ops->get_local_gid(smcd, &smcd_gid);
+ copy_to_smcdgid(&smcd_gid, &smcd->dibs->gid);
clc->hdr.typev1 = SMC_TYPE_D;
clc->d0.gid = htonll(smcd_gid.gid);
clc->d0.token = htonll(conn->rmb_desc->token);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index ac07b963aede..be0c2da83d2b 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -85,7 +85,7 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
* otherwise there is a risk of out-of-sync link groups.
*/
if (!lgr->freeing) {
- mod_delayed_work(system_wq, &lgr->free_work,
+ mod_delayed_work(system_percpu_wq, &lgr->free_work,
(!lgr->is_smcd && lgr->role == SMC_CLNT) ?
SMC_LGR_FREE_DELAY_CLNT :
SMC_LGR_FREE_DELAY_SERV);
@@ -555,7 +555,7 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
goto errattr;
- smcd->ops->get_local_gid(smcd, &smcd_gid);
+ copy_to_smcdgid(&smcd_gid, &smcd->dibs->gid);
if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID,
smcd_gid.gid, SMC_NLA_LGR_D_PAD))
goto errattr;
@@ -896,7 +896,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
rc = SMC_CLC_DECL_MEM;
goto ism_put_vlan;
}
- lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
+ lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", WQ_PERCPU, 0,
SMC_LGR_ID_SIZE, &lgr->id);
if (!lgr->tx_wq) {
rc = -ENOMEM;
@@ -924,7 +924,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
if (ini->is_smcd) {
/* SMC-D specific settings */
smcd = ini->ism_dev[ini->ism_selected];
- get_device(smcd->ops->get_dev(smcd));
+ get_device(&smcd->dibs->dev);
lgr->peer_gid.gid =
ini->ism_peer_gid[ini->ism_selected].gid;
lgr->peer_gid.gid_ext =
@@ -1474,7 +1474,7 @@ static void smc_lgr_free(struct smc_link_group *lgr)
destroy_workqueue(lgr->tx_wq);
if (lgr->is_smcd) {
smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- put_device(lgr->smcd->ops->get_dev(lgr->smcd));
+ put_device(&lgr->smcd->dibs->dev);
}
smc_lgr_put(lgr); /* theoretically last lgr_put */
}
@@ -1883,35 +1883,32 @@ static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
/* Determine vlan of internal TCP socket. */
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(clcsock->sk);
struct netdev_nested_priv priv;
struct net_device *ndev;
+ struct dst_entry *dst;
int rc = 0;
ini->vlan_id = 0;
- if (!dst) {
- rc = -ENOTCONN;
- goto out;
- }
- if (!dst->dev) {
+
+ rcu_read_lock();
+
+ dst = __sk_dst_get(clcsock->sk);
+ ndev = dst ? dst_dev_rcu(dst) : NULL;
+ if (!ndev) {
rc = -ENODEV;
- goto out_rel;
+ goto out;
}
- ndev = dst->dev;
if (is_vlan_dev(ndev)) {
ini->vlan_id = vlan_dev_vlan_id(ndev);
- goto out_rel;
+ goto out;
}
priv.data = (void *)&ini->vlan_id;
- rtnl_lock();
- netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
- rtnl_unlock();
-
-out_rel:
- dst_release(dst);
+ netdev_walk_all_lower_dev_rcu(ndev, smc_vlan_by_tcpsk_walk, &priv);
out:
+ rcu_read_unlock();
+
return rc;
}
@@ -2100,8 +2097,7 @@ int smc_uncompress_bufsize(u8 compressed)
/* try to reuse a sndbuf or rmb description slot for a certain
* buffer size; if not available, return NULL
*/
-static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
- struct rw_semaphore *lock,
+static struct smc_buf_desc *smc_buf_get_slot(struct rw_semaphore *lock,
struct list_head *buf_list)
{
struct smc_buf_desc *buf_slot;
@@ -2442,7 +2438,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
bufsize = smc_uncompress_bufsize(bufsize_comp);
/* check for reusable slot in the link group */
- buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
+ buf_desc = smc_buf_get_slot(lock, buf_list);
if (buf_desc) {
buf_desc->is_dma_need_sync = 0;
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 48a1b1dcb576..a5a78cbff341 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -13,6 +13,7 @@
#define _SMC_CORE_H
#include <linux/atomic.h>
+#include <linux/types.h>
#include <linux/smc.h>
#include <linux/pci.h>
#include <rdma/ib_verbs.h>
@@ -221,6 +222,10 @@ struct smc_buf_desc {
/* virtually contiguous */
};
struct { /* SMC-D */
+ /* SMC-D tx buffer */
+ bool is_attached;
+ /* no need for explicit writes */
+ /* SMC-D rx buffer: */
unsigned short sba_idx;
/* SBA index number */
u64 token;
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 6fdb2d96777a..bf0beaa23bdb 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -64,7 +64,7 @@ static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
return 1;
- r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ r->diag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->diag_inode = sock_i_ino(sk);
return 0;
}
@@ -175,7 +175,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
dinfo.linkid = *((u32 *)conn->lgr->id);
dinfo.peer_gid = conn->lgr->peer_gid.gid;
dinfo.peer_gid_ext = conn->lgr->peer_gid.gid_ext;
- smcd->ops->get_local_gid(smcd, &smcd_gid);
+ copy_to_smcdgid(&smcd_gid, &smcd->dibs->gid);
dinfo.my_gid = smcd_gid.gid;
dinfo.my_gid_ext = smcd_gid.gid_ext;
dinfo.token = conn->rmb_desc->token;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 53828833a3f7..0052f02756eb 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -742,6 +742,9 @@ bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
unsigned int i;
bool ret = false;
+ if (!lnk->smcibdev->ibdev->dma_device)
+ return ret;
+
/* for now there is just one DMA address */
for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
buf_slot->sgt[lnk->link_idx].nents, i) {
@@ -971,13 +974,17 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
smcibdev->pnetid[i]))
smc_pnetid_by_table_ib(smcibdev, i + 1);
smc_copy_netdev_ifindex(smcibdev, i);
- pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
- "%.16s%s\n",
- smcibdev->ibdev->name, i + 1,
- smcibdev->pnetid[i],
- smcibdev->pnetid_by_user[i] ?
- " (user defined)" :
- "");
+ if (smc_pnet_is_pnetid_set(smcibdev->pnetid[i]))
+ pr_warn_ratelimited("smc: ib device %s port %d has pnetid %.16s%s\n",
+ smcibdev->ibdev->name, i + 1,
+ smcibdev->pnetid[i],
+ smcibdev->pnetid_by_user[i] ?
+ " (user defined)" :
+ "");
+ else
+ pr_warn_ratelimited("smc: ib device %s port %d has no pnetid\n",
+ smcibdev->ibdev->name, i + 1);
+
}
schedule_work(&smcibdev->port_event_work);
return 0;
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
index a944e7dcb8b9..a94084b4a498 100644
--- a/net/smc/smc_inet.c
+++ b/net/smc/smc_inet.c
@@ -56,7 +56,6 @@ static struct inet_protosw smc_inet_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet_prot,
.ops = &smc_inet_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -104,27 +103,15 @@ static struct inet_protosw smc_inet6_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet6_prot,
.ops = &smc_inet6_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#endif /* CONFIG_IPV6 */
-static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu)
-{
- /* No need pass it through to clcsock, mss can always be set by
- * sock_create_kern or smc_setsockopt.
- */
- return 0;
-}
-
static int smc_inet_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
/* init common smc sock */
smc_sk_init(net, sk, IPPROTO_SMC);
-
- inet_csk(sk)->icsk_sync_mss = smc_sync_mss;
-
/* create clcsock */
return smc_create_clcsk(net, sk, sk->sk_family);
}
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 84f98e18c7db..7b228ca2f96a 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -17,7 +17,7 @@
#include "smc_ism.h"
#include "smc_pnet.h"
#include "smc_netlink.h"
-#include "linux/ism.h"
+#include "linux/dibs.h"
struct smcd_dev_list smcd_dev_list = {
.list = LIST_HEAD_INIT(smcd_dev_list.list),
@@ -27,21 +27,24 @@ struct smcd_dev_list smcd_dev_list = {
static bool smc_ism_v2_capable;
static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN];
-#if IS_ENABLED(CONFIG_ISM)
-static void smcd_register_dev(struct ism_dev *ism);
-static void smcd_unregister_dev(struct ism_dev *ism);
-static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event);
-static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
+static void smcd_register_dev(struct dibs_dev *dibs);
+static void smcd_unregister_dev(struct dibs_dev *dibs);
+static void smcd_handle_event(struct dibs_dev *dibs,
+ const struct dibs_event *event);
+static void smcd_handle_irq(struct dibs_dev *dibs, unsigned int dmbno,
u16 dmbemask);
-static struct ism_client smc_ism_client = {
- .name = "SMC-D",
- .add = smcd_register_dev,
- .remove = smcd_unregister_dev,
+static struct dibs_client_ops smc_client_ops = {
+ .add_dev = smcd_register_dev,
+ .del_dev = smcd_unregister_dev,
.handle_event = smcd_handle_event,
.handle_irq = smcd_handle_irq,
};
-#endif
+
+static struct dibs_client smc_dibs_client = {
+ .name = "SMC-D",
+ .ops = &smc_client_ops,
+};
static void smc_ism_create_system_eid(void)
{
@@ -68,8 +71,12 @@ static void smc_ism_create_system_eid(void)
int smc_ism_cantalk(struct smcd_gid *peer_gid, unsigned short vlan_id,
struct smcd_dev *smcd)
{
- return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
- vlan_id);
+ struct dibs_dev *dibs = smcd->dibs;
+ uuid_t ism_rgid;
+
+ copy_to_dibsgid(&ism_rgid, peer_gid);
+ return dibs->ops->query_remote_gid(dibs, &ism_rgid, vlan_id ? 1 : 0,
+ vlan_id);
}
void smc_ism_get_system_eid(u8 **eid)
@@ -82,7 +89,7 @@ void smc_ism_get_system_eid(u8 **eid)
u16 smc_ism_get_chid(struct smcd_dev *smcd)
{
- return smcd->ops->get_chid(smcd);
+ return smcd->dibs->ops->get_fabric_id(smcd->dibs);
}
/* HW supports ISM V2 and thus System EID is defined */
@@ -131,7 +138,7 @@ int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
if (!vlanid) /* No valid vlan id */
return -EINVAL;
- if (!smcd->ops->add_vlan_id)
+ if (!smcd->dibs->ops->add_vlan_id)
return -EOPNOTSUPP;
/* create new vlan entry, in case we need it */
@@ -154,7 +161,7 @@ int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
/* no existing entry found.
* add new entry to device; might fail, e.g., if HW limit reached
*/
- if (smcd->ops->add_vlan_id(smcd, vlanid)) {
+ if (smcd->dibs->ops->add_vlan_id(smcd->dibs, vlanid)) {
kfree(new_vlan);
rc = -EIO;
goto out;
@@ -178,7 +185,7 @@ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
if (!vlanid) /* No valid vlan id */
return -EINVAL;
- if (!smcd->ops->del_vlan_id)
+ if (!smcd->dibs->ops->del_vlan_id)
return -EOPNOTSUPP;
spin_lock_irqsave(&smcd->lock, flags);
@@ -196,7 +203,7 @@ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
}
/* Found and the last reference just gone */
- if (smcd->ops->del_vlan_id(smcd, vlanid))
+ if (smcd->dibs->ops->del_vlan_id(smcd->dibs, vlanid))
rc = -EIO;
list_del(&vlan->list);
kfree(vlan);
@@ -205,43 +212,43 @@ out:
return rc;
}
-int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
+void smc_ism_unregister_dmb(struct smcd_dev *smcd,
+ struct smc_buf_desc *dmb_desc)
{
- struct smcd_dmb dmb;
- int rc = 0;
+ struct dibs_dmb dmb;
if (!dmb_desc->dma_addr)
- return rc;
+ return;
memset(&dmb, 0, sizeof(dmb));
dmb.dmb_tok = dmb_desc->token;
- dmb.sba_idx = dmb_desc->sba_idx;
+ dmb.idx = dmb_desc->sba_idx;
dmb.cpu_addr = dmb_desc->cpu_addr;
dmb.dma_addr = dmb_desc->dma_addr;
dmb.dmb_len = dmb_desc->len;
- rc = smcd->ops->unregister_dmb(smcd, &dmb);
- if (!rc || rc == ISM_ERROR) {
- dmb_desc->cpu_addr = NULL;
- dmb_desc->dma_addr = 0;
- }
- return rc;
+ smcd->dibs->ops->unregister_dmb(smcd->dibs, &dmb);
+
+ return;
}
int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
struct smc_buf_desc *dmb_desc)
{
- struct smcd_dmb dmb;
+ struct dibs_dev *dibs;
+ struct dibs_dmb dmb;
int rc;
memset(&dmb, 0, sizeof(dmb));
dmb.dmb_len = dmb_len;
- dmb.sba_idx = dmb_desc->sba_idx;
+ dmb.idx = dmb_desc->sba_idx;
dmb.vlan_id = lgr->vlan_id;
- dmb.rgid = lgr->peer_gid.gid;
- rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, lgr->smcd->client);
+ copy_to_dibsgid(&dmb.rgid, &lgr->peer_gid);
+
+ dibs = lgr->smcd->dibs;
+ rc = dibs->ops->register_dmb(dibs, &dmb, &smc_dibs_client);
if (!rc) {
- dmb_desc->sba_idx = dmb.sba_idx;
+ dmb_desc->sba_idx = dmb.idx;
dmb_desc->token = dmb.dmb_tok;
dmb_desc->cpu_addr = dmb.cpu_addr;
dmb_desc->dma_addr = dmb.dma_addr;
@@ -256,38 +263,39 @@ bool smc_ism_support_dmb_nocopy(struct smcd_dev *smcd)
* merging sndbuf with peer DMB to avoid
* data copies between them.
*/
- return (smcd->ops->support_dmb_nocopy &&
- smcd->ops->support_dmb_nocopy(smcd));
+ return (smcd->dibs->ops->support_mmapped_rdmb &&
+ smcd->dibs->ops->support_mmapped_rdmb(smcd->dibs));
}
int smc_ism_attach_dmb(struct smcd_dev *dev, u64 token,
struct smc_buf_desc *dmb_desc)
{
- struct smcd_dmb dmb;
+ struct dibs_dmb dmb;
int rc = 0;
- if (!dev->ops->attach_dmb)
+ if (!dev->dibs->ops->attach_dmb)
return -EINVAL;
memset(&dmb, 0, sizeof(dmb));
dmb.dmb_tok = token;
- rc = dev->ops->attach_dmb(dev, &dmb);
+ rc = dev->dibs->ops->attach_dmb(dev->dibs, &dmb);
if (!rc) {
- dmb_desc->sba_idx = dmb.sba_idx;
+ dmb_desc->sba_idx = dmb.idx;
dmb_desc->token = dmb.dmb_tok;
dmb_desc->cpu_addr = dmb.cpu_addr;
dmb_desc->dma_addr = dmb.dma_addr;
dmb_desc->len = dmb.dmb_len;
+ dmb_desc->is_attached = true;
}
return rc;
}
int smc_ism_detach_dmb(struct smcd_dev *dev, u64 token)
{
- if (!dev->ops->detach_dmb)
+ if (!dev->dibs->ops->detach_dmb)
return -EINVAL;
- return dev->ops->detach_dmb(dev, token);
+ return dev->dibs->ops->detach_dmb(dev->dibs, token);
}
static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
@@ -297,12 +305,12 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
char smc_pnet[SMC_MAX_PNETID_LEN + 1];
struct smc_pci_dev smc_pci_dev;
struct nlattr *port_attrs;
+ struct dibs_dev *dibs;
struct nlattr *attrs;
- struct ism_dev *ism;
int use_cnt = 0;
void *nlh;
- ism = smcd->priv;
+ dibs = smcd->dibs;
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&smc_gen_nl_family, NLM_F_MULTI,
SMC_NETLINK_GET_DEV_SMCD);
@@ -317,7 +325,7 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0))
goto errattr;
memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
- smc_set_pci_values(to_pci_dev(ism->dev.parent), &smc_pci_dev);
+ smc_set_pci_values(to_pci_dev(dibs->dev.parent), &smc_pci_dev);
if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
goto errattr;
if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
@@ -367,7 +375,7 @@ static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list,
list_for_each_entry(smcd, &dev_list->list, list) {
if (num < snum)
goto next;
- if (smc_ism_is_loopback(smcd))
+ if (smc_ism_is_loopback(smcd->dibs))
goto next;
if (smc_nl_handle_smcd_dev(smcd, skb, cb))
goto errout;
@@ -385,11 +393,10 @@ int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-#if IS_ENABLED(CONFIG_ISM)
struct smc_ism_event_work {
struct work_struct work;
struct smcd_dev *smcd;
- struct ism_event event;
+ struct dibs_event event;
};
#define ISM_EVENT_REQUEST 0x0001
@@ -409,25 +416,27 @@ union smcd_sw_event_info {
static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
{
- struct smcd_gid peer_gid = { .gid = wrk->event.tok,
- .gid_ext = 0 };
+ struct dibs_dev *dibs = wrk->smcd->dibs;
union smcd_sw_event_info ev_info;
+ struct smcd_gid peer_gid;
+ uuid_t ism_rgid;
- ev_info.info = wrk->event.info;
- switch (wrk->event.code) {
+ copy_to_smcdgid(&peer_gid, &wrk->event.gid);
+ ev_info.info = wrk->event.data;
+ switch (wrk->event.subtype) {
case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */
smc_smcd_terminate(wrk->smcd, &peer_gid, ev_info.vlan_id);
break;
case ISM_EVENT_CODE_TESTLINK: /* Activity timer */
if (ev_info.code == ISM_EVENT_REQUEST &&
- wrk->smcd->ops->signal_event) {
+ dibs->ops->signal_event) {
ev_info.code = ISM_EVENT_RESPONSE;
- wrk->smcd->ops->signal_event(wrk->smcd,
- &peer_gid,
- ISM_EVENT_REQUEST_IR,
- ISM_EVENT_CODE_TESTLINK,
- ev_info.info);
- }
+ copy_to_dibsgid(&ism_rgid, &peer_gid);
+ dibs->ops->signal_event(dibs, &ism_rgid,
+ ISM_EVENT_REQUEST_IR,
+ ISM_EVENT_CODE_TESTLINK,
+ ev_info.info);
+ }
break;
}
}
@@ -437,41 +446,39 @@ static void smc_ism_event_work(struct work_struct *work)
{
struct smc_ism_event_work *wrk =
container_of(work, struct smc_ism_event_work, work);
- struct smcd_gid smcd_gid = { .gid = wrk->event.tok,
- .gid_ext = 0 };
+ struct smcd_gid smcd_gid;
+
+ copy_to_smcdgid(&smcd_gid, &wrk->event.gid);
switch (wrk->event.type) {
- case ISM_EVENT_GID: /* GID event, token is peer GID */
+ case DIBS_DEV_EVENT: /* GID event, token is peer GID */
smc_smcd_terminate(wrk->smcd, &smcd_gid, VLAN_VID_MASK);
break;
- case ISM_EVENT_DMB:
+ case DIBS_BUF_EVENT:
break;
- case ISM_EVENT_SWR: /* Software defined event */
+ case DIBS_SW_EVENT: /* Software defined event */
smcd_handle_sw_event(wrk);
break;
}
kfree(wrk);
}
-static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
- const struct smcd_ops *ops, int max_dmbs)
+static struct smcd_dev *smcd_alloc_dev(const char *name, int max_dmbs)
{
struct smcd_dev *smcd;
- smcd = devm_kzalloc(parent, sizeof(*smcd), GFP_KERNEL);
+ smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
if (!smcd)
return NULL;
- smcd->conn = devm_kcalloc(parent, max_dmbs,
- sizeof(struct smc_connection *), GFP_KERNEL);
+ smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
+ GFP_KERNEL);
if (!smcd->conn)
- return NULL;
+ goto free_smcd;
smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
WQ_MEM_RECLAIM, name);
if (!smcd->event_wq)
- return NULL;
-
- smcd->ops = ops;
+ goto free_conn;
spin_lock_init(&smcd->lock);
spin_lock_init(&smcd->lgr_lock);
@@ -479,28 +486,37 @@ static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
INIT_LIST_HEAD(&smcd->lgr_list);
init_waitqueue_head(&smcd->lgrs_deleted);
return smcd;
+
+free_conn:
+ kfree(smcd->conn);
+free_smcd:
+ kfree(smcd);
+ return NULL;
}
-static void smcd_register_dev(struct ism_dev *ism)
+static void smcd_register_dev(struct dibs_dev *dibs)
{
- const struct smcd_ops *ops = ism_get_smcd_ops();
struct smcd_dev *smcd, *fentry;
+ int max_dmbs;
- if (!ops)
- return;
+ max_dmbs = dibs->ops->max_dmbs();
- smcd = smcd_alloc_dev(&ism->pdev->dev, dev_name(&ism->pdev->dev), ops,
- ISM_NR_DMBS);
+ smcd = smcd_alloc_dev(dev_name(&dibs->dev), max_dmbs);
if (!smcd)
return;
- smcd->priv = ism;
- smcd->client = &smc_ism_client;
- ism_set_priv(ism, &smc_ism_client, smcd);
- if (smc_pnetid_by_dev_port(&ism->pdev->dev, 0, smcd->pnetid))
+
+ smcd->dibs = dibs;
+ dibs_set_priv(dibs, &smc_dibs_client, smcd);
+
+ if (smc_pnetid_by_dev_port(dibs->dev.parent, 0, smcd->pnetid))
smc_pnetid_by_table_smcd(smcd);
- if (smcd->ops->supports_v2())
+ if (smc_ism_is_loopback(dibs) ||
+ (dibs->ops->add_vlan_id &&
+ !dibs->ops->add_vlan_id(dibs, ISM_RESERVED_VLANID))) {
smc_ism_set_v2_capable();
+ }
+
mutex_lock(&smcd_dev_list.mutex);
/* sort list:
* - devices without pnetid before devices with pnetid;
@@ -509,7 +525,7 @@ static void smcd_register_dev(struct ism_dev *ism)
if (!smcd->pnetid[0]) {
fentry = list_first_entry_or_null(&smcd_dev_list.list,
struct smcd_dev, list);
- if (fentry && smc_ism_is_loopback(fentry))
+ if (fentry && smc_ism_is_loopback(fentry->dibs))
list_add(&smcd->list, &fentry->list);
else
list_add(&smcd->list, &smcd_dev_list.list);
@@ -518,25 +534,32 @@ static void smcd_register_dev(struct ism_dev *ism)
}
mutex_unlock(&smcd_dev_list.mutex);
- pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
- dev_name(&ism->dev), smcd->pnetid,
- smcd->pnetid_by_user ? " (user defined)" : "");
-
+ if (smc_pnet_is_pnetid_set(smcd->pnetid))
+ pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
+ dev_name(&dibs->dev), smcd->pnetid,
+ smcd->pnetid_by_user ?
+ " (user defined)" :
+ "");
+ else
+ pr_warn_ratelimited("smc: adding smcd device %s without pnetid\n",
+ dev_name(&dibs->dev));
return;
}
-static void smcd_unregister_dev(struct ism_dev *ism)
+static void smcd_unregister_dev(struct dibs_dev *dibs)
{
- struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
+ struct smcd_dev *smcd = dibs_get_priv(dibs, &smc_dibs_client);
pr_warn_ratelimited("smc: removing smcd device %s\n",
- dev_name(&ism->dev));
+ dev_name(&dibs->dev));
smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
mutex_lock(&smcd_dev_list.mutex);
list_del_init(&smcd->list);
mutex_unlock(&smcd_dev_list.mutex);
destroy_workqueue(smcd->event_wq);
+ kfree(smcd->conn);
+ kfree(smcd);
}
/* SMCD Device event handler. Called from ISM device interrupt handler.
@@ -550,9 +573,10 @@ static void smcd_unregister_dev(struct ism_dev *ism)
* Context:
* - Function called in IRQ context from ISM device driver event handler.
*/
-static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event)
+static void smcd_handle_event(struct dibs_dev *dibs,
+ const struct dibs_event *event)
{
- struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
+ struct smcd_dev *smcd = dibs_get_priv(dibs, &smc_dibs_client);
struct smc_ism_event_work *wrk;
if (smcd->going_away)
@@ -574,10 +598,10 @@ static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event)
* Context:
* - Function called in IRQ context from ISM device driver IRQ handler.
*/
-static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
+static void smcd_handle_irq(struct dibs_dev *dibs, unsigned int dmbno,
u16 dmbemask)
{
- struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
+ struct smcd_dev *smcd = dibs_get_priv(dibs, &smc_dibs_client);
struct smc_connection *conn = NULL;
unsigned long flags;
@@ -587,27 +611,26 @@ static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
tasklet_schedule(&conn->rx_tsklet);
spin_unlock_irqrestore(&smcd->lock, flags);
}
-#endif
int smc_ism_signal_shutdown(struct smc_link_group *lgr)
{
int rc = 0;
-#if IS_ENABLED(CONFIG_ISM)
union smcd_sw_event_info ev_info;
+ uuid_t ism_rgid;
if (lgr->peer_shutdown)
return 0;
- if (!lgr->smcd->ops->signal_event)
+ if (!lgr->smcd->dibs->ops->signal_event)
return 0;
memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
ev_info.vlan_id = lgr->vlan_id;
ev_info.code = ISM_EVENT_REQUEST;
- rc = lgr->smcd->ops->signal_event(lgr->smcd, &lgr->peer_gid,
+ copy_to_dibsgid(&ism_rgid, &lgr->peer_gid);
+ rc = lgr->smcd->dibs->ops->signal_event(lgr->smcd->dibs, &ism_rgid,
ISM_EVENT_REQUEST_IR,
ISM_EVENT_CODE_SHUTDOWN,
ev_info.info);
-#endif
return rc;
}
@@ -618,15 +641,11 @@ int smc_ism_init(void)
smc_ism_v2_capable = false;
smc_ism_create_system_eid();
-#if IS_ENABLED(CONFIG_ISM)
- rc = ism_register_client(&smc_ism_client);
-#endif
+ rc = dibs_register_client(&smc_dibs_client);
return rc;
}
void smc_ism_exit(void)
{
-#if IS_ENABLED(CONFIG_ISM)
- ism_unregister_client(&smc_ism_client);
-#endif
+ dibs_unregister_client(&smc_dibs_client);
}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index 6763133dd8d0..a1575e31df73 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -12,6 +12,7 @@
#include <linux/uio.h>
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/dibs.h>
#include "smc.h"
@@ -47,7 +48,8 @@ int smc_ism_get_vlan(struct smcd_dev *dev, unsigned short vlan_id);
int smc_ism_put_vlan(struct smcd_dev *dev, unsigned short vlan_id);
int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
struct smc_buf_desc *dmb_desc);
-int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
+void smc_ism_unregister_dmb(struct smcd_dev *dev,
+ struct smc_buf_desc *dmb_desc);
bool smc_ism_support_dmb_nocopy(struct smcd_dev *smcd);
int smc_ism_attach_dmb(struct smcd_dev *dev, u64 token,
struct smc_buf_desc *dmb_desc);
@@ -67,7 +69,9 @@ static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
{
int rc;
- rc = smcd->ops->move_data(smcd, dmb_tok, idx, sf, offset, data, len);
+ rc = smcd->dibs->ops->move_data(smcd->dibs, dmb_tok, idx, sf, offset,
+ data, len);
+
return rc < 0 ? rc : 0;
}
@@ -84,14 +88,36 @@ static inline bool __smc_ism_is_emulated(u16 chid)
static inline bool smc_ism_is_emulated(struct smcd_dev *smcd)
{
- u16 chid = smcd->ops->get_chid(smcd);
+ u16 chid = smcd->dibs->ops->get_fabric_id(smcd->dibs);
return __smc_ism_is_emulated(chid);
}
-static inline bool smc_ism_is_loopback(struct smcd_dev *smcd)
+static inline bool smc_ism_is_loopback(struct dibs_dev *dibs)
{
- return (smcd->ops->get_chid(smcd) == 0xFFFF);
+ return (dibs->ops->get_fabric_id(dibs) == DIBS_LOOPBACK_FABRIC);
+}
+
+static inline void copy_to_smcdgid(struct smcd_gid *sgid, uuid_t *dibs_gid)
+{
+ __be64 temp;
+
+ memcpy(&temp, dibs_gid, sizeof(sgid->gid));
+ sgid->gid = ntohll(temp);
+ memcpy(&temp, (uint8_t *)dibs_gid + sizeof(sgid->gid),
+ sizeof(sgid->gid_ext));
+ sgid->gid_ext = ntohll(temp);
+}
+
+static inline void copy_to_dibsgid(uuid_t *dibs_gid, struct smcd_gid *sgid)
+{
+ __be64 temp;
+
+ temp = htonll(sgid->gid);
+ memcpy(dibs_gid, &temp, sizeof(sgid->gid));
+ temp = htonll(sgid->gid_ext);
+ memcpy((uint8_t *)dibs_gid + sizeof(sgid->gid), &temp,
+ sizeof(sgid->gid_ext));
}
#endif
diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c
deleted file mode 100644
index 3c5f64ca4115..000000000000
--- a/net/smc/smc_loopback.c
+++ /dev/null
@@ -1,427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Shared Memory Communications Direct over loopback-ism device.
- *
- * Functions for loopback-ism device.
- *
- * Copyright (c) 2024, Alibaba Inc.
- *
- * Author: Wen Gu <guwen@linux.alibaba.com>
- * Tony Lu <tonylu@linux.alibaba.com>
- *
- */
-
-#include <linux/device.h>
-#include <linux/types.h>
-#include <net/smc.h>
-
-#include "smc_cdc.h"
-#include "smc_ism.h"
-#include "smc_loopback.h"
-
-#define SMC_LO_V2_CAPABLE 0x1 /* loopback-ism acts as ISMv2 */
-#define SMC_LO_SUPPORT_NOCOPY 0x1
-#define SMC_DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-static const char smc_lo_dev_name[] = "loopback-ism";
-static struct smc_lo_dev *lo_dev;
-
-static void smc_lo_generate_ids(struct smc_lo_dev *ldev)
-{
- struct smcd_gid *lgid = &ldev->local_gid;
- uuid_t uuid;
-
- uuid_gen(&uuid);
- memcpy(&lgid->gid, &uuid, sizeof(lgid->gid));
- memcpy(&lgid->gid_ext, (u8 *)&uuid + sizeof(lgid->gid),
- sizeof(lgid->gid_ext));
-
- ldev->chid = SMC_LO_RESERVED_CHID;
-}
-
-static int smc_lo_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid)
-{
- struct smc_lo_dev *ldev = smcd->priv;
-
- /* rgid should be the same as lgid */
- if (!ldev || rgid->gid != ldev->local_gid.gid ||
- rgid->gid_ext != ldev->local_gid.gid_ext)
- return -ENETUNREACH;
- return 0;
-}
-
-static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
- void *client_priv)
-{
- struct smc_lo_dmb_node *dmb_node, *tmp_node;
- struct smc_lo_dev *ldev = smcd->priv;
- int sba_idx, rc;
-
- /* check space for new dmb */
- for_each_clear_bit(sba_idx, ldev->sba_idx_mask, SMC_LO_MAX_DMBS) {
- if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
- break;
- }
- if (sba_idx == SMC_LO_MAX_DMBS)
- return -ENOSPC;
-
- dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
- if (!dmb_node) {
- rc = -ENOMEM;
- goto err_bit;
- }
-
- dmb_node->sba_idx = sba_idx;
- dmb_node->len = dmb->dmb_len;
- dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
- __GFP_NOWARN | __GFP_NORETRY |
- __GFP_NOMEMALLOC);
- if (!dmb_node->cpu_addr) {
- rc = -ENOMEM;
- goto err_node;
- }
- dmb_node->dma_addr = SMC_DMA_ADDR_INVALID;
- refcount_set(&dmb_node->refcnt, 1);
-
-again:
- /* add new dmb into hash table */
- get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
- write_lock_bh(&ldev->dmb_ht_lock);
- hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
- if (tmp_node->token == dmb_node->token) {
- write_unlock_bh(&ldev->dmb_ht_lock);
- goto again;
- }
- }
- hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
- write_unlock_bh(&ldev->dmb_ht_lock);
- atomic_inc(&ldev->dmb_cnt);
-
- dmb->sba_idx = dmb_node->sba_idx;
- dmb->dmb_tok = dmb_node->token;
- dmb->cpu_addr = dmb_node->cpu_addr;
- dmb->dma_addr = dmb_node->dma_addr;
- dmb->dmb_len = dmb_node->len;
-
- return 0;
-
-err_node:
- kfree(dmb_node);
-err_bit:
- clear_bit(sba_idx, ldev->sba_idx_mask);
- return rc;
-}
-
-static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev,
- struct smc_lo_dmb_node *dmb_node)
-{
- /* remove dmb from hash table */
- write_lock_bh(&ldev->dmb_ht_lock);
- hash_del(&dmb_node->list);
- write_unlock_bh(&ldev->dmb_ht_lock);
-
- clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
- kvfree(dmb_node->cpu_addr);
- kfree(dmb_node);
-
- if (atomic_dec_and_test(&ldev->dmb_cnt))
- wake_up(&ldev->ldev_release);
-}
-
-static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
-{
- struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
- struct smc_lo_dev *ldev = smcd->priv;
-
- /* find dmb from hash table */
- read_lock_bh(&ldev->dmb_ht_lock);
- hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
- if (tmp_node->token == dmb->dmb_tok) {
- dmb_node = tmp_node;
- break;
- }
- }
- if (!dmb_node) {
- read_unlock_bh(&ldev->dmb_ht_lock);
- return -EINVAL;
- }
- read_unlock_bh(&ldev->dmb_ht_lock);
-
- if (refcount_dec_and_test(&dmb_node->refcnt))
- __smc_lo_unregister_dmb(ldev, dmb_node);
- return 0;
-}
-
-static int smc_lo_support_dmb_nocopy(struct smcd_dev *smcd)
-{
- return SMC_LO_SUPPORT_NOCOPY;
-}
-
-static int smc_lo_attach_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
-{
- struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
- struct smc_lo_dev *ldev = smcd->priv;
-
- /* find dmb_node according to dmb->dmb_tok */
- read_lock_bh(&ldev->dmb_ht_lock);
- hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
- if (tmp_node->token == dmb->dmb_tok) {
- dmb_node = tmp_node;
- break;
- }
- }
- if (!dmb_node) {
- read_unlock_bh(&ldev->dmb_ht_lock);
- return -EINVAL;
- }
- read_unlock_bh(&ldev->dmb_ht_lock);
-
- if (!refcount_inc_not_zero(&dmb_node->refcnt))
- /* the dmb is being unregistered, but has
- * not been removed from the hash table.
- */
- return -EINVAL;
-
- /* provide dmb information */
- dmb->sba_idx = dmb_node->sba_idx;
- dmb->dmb_tok = dmb_node->token;
- dmb->cpu_addr = dmb_node->cpu_addr;
- dmb->dma_addr = dmb_node->dma_addr;
- dmb->dmb_len = dmb_node->len;
- return 0;
-}
-
-static int smc_lo_detach_dmb(struct smcd_dev *smcd, u64 token)
-{
- struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node;
- struct smc_lo_dev *ldev = smcd->priv;
-
- /* find dmb_node according to dmb->dmb_tok */
- read_lock_bh(&ldev->dmb_ht_lock);
- hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
- if (tmp_node->token == token) {
- dmb_node = tmp_node;
- break;
- }
- }
- if (!dmb_node) {
- read_unlock_bh(&ldev->dmb_ht_lock);
- return -EINVAL;
- }
- read_unlock_bh(&ldev->dmb_ht_lock);
-
- if (refcount_dec_and_test(&dmb_node->refcnt))
- __smc_lo_unregister_dmb(ldev, dmb_node);
- return 0;
-}
-
-static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok,
- unsigned int idx, bool sf, unsigned int offset,
- void *data, unsigned int size)
-{
- struct smc_lo_dmb_node *rmb_node = NULL, *tmp_node;
- struct smc_lo_dev *ldev = smcd->priv;
- struct smc_connection *conn;
-
- if (!sf)
- /* since sndbuf is merged with peer DMB, there is
- * no need to copy data from sndbuf to peer DMB.
- */
- return 0;
-
- read_lock_bh(&ldev->dmb_ht_lock);
- hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
- if (tmp_node->token == dmb_tok) {
- rmb_node = tmp_node;
- break;
- }
- }
- if (!rmb_node) {
- read_unlock_bh(&ldev->dmb_ht_lock);
- return -EINVAL;
- }
- memcpy((char *)rmb_node->cpu_addr + offset, data, size);
- read_unlock_bh(&ldev->dmb_ht_lock);
-
- conn = smcd->conn[rmb_node->sba_idx];
- if (!conn || conn->killed)
- return -EPIPE;
- tasklet_schedule(&conn->rx_tsklet);
- return 0;
-}
-
-static int smc_lo_supports_v2(void)
-{
- return SMC_LO_V2_CAPABLE;
-}
-
-static void smc_lo_get_local_gid(struct smcd_dev *smcd,
- struct smcd_gid *smcd_gid)
-{
- struct smc_lo_dev *ldev = smcd->priv;
-
- smcd_gid->gid = ldev->local_gid.gid;
- smcd_gid->gid_ext = ldev->local_gid.gid_ext;
-}
-
-static u16 smc_lo_get_chid(struct smcd_dev *smcd)
-{
- return ((struct smc_lo_dev *)smcd->priv)->chid;
-}
-
-static struct device *smc_lo_get_dev(struct smcd_dev *smcd)
-{
- return &((struct smc_lo_dev *)smcd->priv)->dev;
-}
-
-static const struct smcd_ops lo_ops = {
- .query_remote_gid = smc_lo_query_rgid,
- .register_dmb = smc_lo_register_dmb,
- .unregister_dmb = smc_lo_unregister_dmb,
- .support_dmb_nocopy = smc_lo_support_dmb_nocopy,
- .attach_dmb = smc_lo_attach_dmb,
- .detach_dmb = smc_lo_detach_dmb,
- .add_vlan_id = NULL,
- .del_vlan_id = NULL,
- .set_vlan_required = NULL,
- .reset_vlan_required = NULL,
- .signal_event = NULL,
- .move_data = smc_lo_move_data,
- .supports_v2 = smc_lo_supports_v2,
- .get_local_gid = smc_lo_get_local_gid,
- .get_chid = smc_lo_get_chid,
- .get_dev = smc_lo_get_dev,
-};
-
-static struct smcd_dev *smcd_lo_alloc_dev(const struct smcd_ops *ops,
- int max_dmbs)
-{
- struct smcd_dev *smcd;
-
- smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
- if (!smcd)
- return NULL;
-
- smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
- GFP_KERNEL);
- if (!smcd->conn)
- goto out_smcd;
-
- smcd->ops = ops;
-
- spin_lock_init(&smcd->lock);
- spin_lock_init(&smcd->lgr_lock);
- INIT_LIST_HEAD(&smcd->vlan);
- INIT_LIST_HEAD(&smcd->lgr_list);
- init_waitqueue_head(&smcd->lgrs_deleted);
- return smcd;
-
-out_smcd:
- kfree(smcd);
- return NULL;
-}
-
-static int smcd_lo_register_dev(struct smc_lo_dev *ldev)
-{
- struct smcd_dev *smcd;
-
- smcd = smcd_lo_alloc_dev(&lo_ops, SMC_LO_MAX_DMBS);
- if (!smcd)
- return -ENOMEM;
- ldev->smcd = smcd;
- smcd->priv = ldev;
- smc_ism_set_v2_capable();
- mutex_lock(&smcd_dev_list.mutex);
- list_add(&smcd->list, &smcd_dev_list.list);
- mutex_unlock(&smcd_dev_list.mutex);
- pr_warn_ratelimited("smc: adding smcd device %s\n",
- dev_name(&ldev->dev));
- return 0;
-}
-
-static void smcd_lo_unregister_dev(struct smc_lo_dev *ldev)
-{
- struct smcd_dev *smcd = ldev->smcd;
-
- pr_warn_ratelimited("smc: removing smcd device %s\n",
- dev_name(&ldev->dev));
- smcd->going_away = 1;
- smc_smcd_terminate_all(smcd);
- mutex_lock(&smcd_dev_list.mutex);
- list_del_init(&smcd->list);
- mutex_unlock(&smcd_dev_list.mutex);
- kfree(smcd->conn);
- kfree(smcd);
-}
-
-static int smc_lo_dev_init(struct smc_lo_dev *ldev)
-{
- smc_lo_generate_ids(ldev);
- rwlock_init(&ldev->dmb_ht_lock);
- hash_init(ldev->dmb_ht);
- atomic_set(&ldev->dmb_cnt, 0);
- init_waitqueue_head(&ldev->ldev_release);
-
- return smcd_lo_register_dev(ldev);
-}
-
-static void smc_lo_dev_exit(struct smc_lo_dev *ldev)
-{
- smcd_lo_unregister_dev(ldev);
- if (atomic_read(&ldev->dmb_cnt))
- wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
-}
-
-static void smc_lo_dev_release(struct device *dev)
-{
- struct smc_lo_dev *ldev =
- container_of(dev, struct smc_lo_dev, dev);
-
- kfree(ldev);
-}
-
-static int smc_lo_dev_probe(void)
-{
- struct smc_lo_dev *ldev;
- int ret;
-
- ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
- if (!ldev)
- return -ENOMEM;
-
- ldev->dev.parent = NULL;
- ldev->dev.release = smc_lo_dev_release;
- device_initialize(&ldev->dev);
- dev_set_name(&ldev->dev, smc_lo_dev_name);
-
- ret = smc_lo_dev_init(ldev);
- if (ret)
- goto free_dev;
-
- lo_dev = ldev; /* global loopback device */
- return 0;
-
-free_dev:
- put_device(&ldev->dev);
- return ret;
-}
-
-static void smc_lo_dev_remove(void)
-{
- if (!lo_dev)
- return;
-
- smc_lo_dev_exit(lo_dev);
- put_device(&lo_dev->dev); /* device_initialize in smc_lo_dev_probe */
-}
-
-int smc_loopback_init(void)
-{
- return smc_lo_dev_probe();
-}
-
-void smc_loopback_exit(void)
-{
- smc_lo_dev_remove();
-}
diff --git a/net/smc/smc_loopback.h b/net/smc/smc_loopback.h
deleted file mode 100644
index 04dc6808d2e1..000000000000
--- a/net/smc/smc_loopback.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Shared Memory Communications Direct over loopback-ism device.
- *
- * SMC-D loopback-ism device structure definitions.
- *
- * Copyright (c) 2024, Alibaba Inc.
- *
- * Author: Wen Gu <guwen@linux.alibaba.com>
- * Tony Lu <tonylu@linux.alibaba.com>
- *
- */
-
-#ifndef _SMC_LOOPBACK_H
-#define _SMC_LOOPBACK_H
-
-#include <linux/device.h>
-#include <net/smc.h>
-
-#if IS_ENABLED(CONFIG_SMC_LO)
-#define SMC_LO_MAX_DMBS 5000
-#define SMC_LO_DMBS_HASH_BITS 12
-#define SMC_LO_RESERVED_CHID 0xFFFF
-
-struct smc_lo_dmb_node {
- struct hlist_node list;
- u64 token;
- u32 len;
- u32 sba_idx;
- void *cpu_addr;
- dma_addr_t dma_addr;
- refcount_t refcnt;
-};
-
-struct smc_lo_dev {
- struct smcd_dev *smcd;
- struct device dev;
- u16 chid;
- struct smcd_gid local_gid;
- atomic_t dmb_cnt;
- rwlock_t dmb_ht_lock;
- DECLARE_BITMAP(sba_idx_mask, SMC_LO_MAX_DMBS);
- DECLARE_HASHTABLE(dmb_ht, SMC_LO_DMBS_HASH_BITS);
- wait_queue_head_t ldev_release;
-};
-
-int smc_loopback_init(void);
-void smc_loopback_exit(void);
-#else
-static inline int smc_loopback_init(void)
-{
- return 0;
-}
-
-static inline void smc_loopback_exit(void)
-{
-}
-#endif
-
-#endif /* _SMC_LOOPBACK_H */
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index b391c2ef463f..a3a1e1fde8eb 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -169,7 +169,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
pr_warn_ratelimited("smc: smcd device %s "
"erased user defined pnetid "
"%.16s\n",
- dev_name(smcd->ops->get_dev(smcd)),
+ dev_name(&smcd->dibs->dev),
smcd->pnetid);
memset(smcd->pnetid, 0, SMC_MAX_PNETID_LEN);
smcd->pnetid_by_user = false;
@@ -332,8 +332,11 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
- if (!strncmp(dev_name(smcd_dev->ops->get_dev(smcd_dev)),
- smcd_name, IB_DEVICE_NAME_MAX - 1))
+ if (!strncmp(dev_name(&smcd_dev->dibs->dev), smcd_name,
+ IB_DEVICE_NAME_MAX - 1) ||
+ (smcd_dev->dibs->dev.parent &&
+ !strncmp(dev_name(smcd_dev->dibs->dev.parent), smcd_name,
+ IB_DEVICE_NAME_MAX - 1)))
goto out;
}
smcd_dev = NULL;
@@ -370,7 +373,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
goto out_put;
new_pe->type = SMC_PNET_ETH;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
- strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
+ strscpy(new_pe->eth_name, eth_name);
rc = -EEXIST;
new_netdev = true;
mutex_lock(&pnettable->lock);
@@ -413,7 +416,6 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
bool smcddev_applied = true;
bool ibdev_applied = true;
struct smcd_dev *smcd;
- struct device *dev;
bool new_ibdev;
/* try to apply the pnetid to active devices */
@@ -431,10 +433,8 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
if (smcd) {
smcddev_applied = smc_pnet_apply_smcd(smcd, pnet_name);
if (smcddev_applied) {
- dev = smcd->ops->get_dev(smcd);
- pr_warn_ratelimited("smc: smcd device %s "
- "applied user defined pnetid "
- "%.16s\n", dev_name(dev),
+ pr_warn_ratelimited("smc: smcd device %s applied user defined pnetid %.16s\n",
+ dev_name(&smcd->dibs->dev),
smcd->pnetid);
}
}
@@ -450,7 +450,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
return -ENOMEM;
new_pe->type = SMC_PNET_IB;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
- strncpy(new_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX);
+ strscpy(new_pe->ib_name, ib_name);
new_pe->ib_port = ib_port;
new_ibdev = true;
@@ -1126,37 +1126,38 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
*/
void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(sk);
-
- if (!dst)
- goto out;
- if (!dst->dev)
- goto out_rel;
+ struct net_device *dev;
+ struct dst_entry *dst;
- smc_pnet_find_roce_by_pnetid(dst->dev, ini);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ dev_hold(dev);
+ rcu_read_unlock();
-out_rel:
- dst_release(dst);
-out:
- return;
+ if (dev) {
+ smc_pnet_find_roce_by_pnetid(dev, ini);
+ dev_put(dev);
+ }
}
void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
{
- struct dst_entry *dst = sk_dst_get(sk);
+ struct net_device *dev;
+ struct dst_entry *dst;
ini->ism_dev[0] = NULL;
- if (!dst)
- goto out;
- if (!dst->dev)
- goto out_rel;
- smc_pnet_find_ism_by_pnetid(dst->dev, ini);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ dev_hold(dev);
+ rcu_read_unlock();
-out_rel:
- dst_release(dst);
-out:
- return;
+ if (dev) {
+ smc_pnet_find_ism_by_pnetid(dev, ini);
+ dev_put(dev);
+ }
}
/* Lookup and apply a pnet table entry to the given ib device.
@@ -1192,7 +1193,6 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
*/
int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
{
- const char *ib_name = dev_name(smcddev->ops->get_dev(smcddev));
struct smc_pnettable *pnettable;
struct smc_pnetentry *tmp_pe;
struct smc_net *sn;
@@ -1205,7 +1205,13 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
- !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
+ (!strncmp(tmp_pe->ib_name,
+ dev_name(&smcddev->dibs->dev),
+ sizeof(tmp_pe->ib_name)) ||
+ (smcddev->dibs->dev.parent &&
+ !strncmp(tmp_pe->ib_name,
+ dev_name(smcddev->dibs->dev.parent),
+ sizeof(tmp_pe->ib_name))))) {
smc_pnet_apply_smcd(smcddev, tmp_pe->pnet_name);
rc = 0;
break;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 214ac3cbcf9a..3144b4b1fe29 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -426,6 +426,9 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
int srcchunk, dstchunk;
int rc;
+ if (conn->sndbuf_desc->is_attached)
+ return 0;
+
for (dstchunk = 0; dstchunk < 2; dstchunk++) {
for (srcchunk = 0; srcchunk < 2; srcchunk++) {
void *data = conn->sndbuf_desc->cpu_addr + src_off;
diff --git a/net/socket.c b/net/socket.c
index 9a0e720f0859..e8892b218708 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -276,28 +276,41 @@ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *k
static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
void __user *uaddr, int __user *ulen)
{
- int err;
int len;
BUG_ON(klen > sizeof(struct sockaddr_storage));
- err = get_user(len, ulen);
- if (err)
- return err;
+
+ if (can_do_masked_user_access())
+ ulen = masked_user_access_begin(ulen);
+ else if (!user_access_begin(ulen, 4))
+ return -EFAULT;
+
+ unsafe_get_user(len, ulen, efault_end);
+
if (len > klen)
len = klen;
- if (len < 0)
- return -EINVAL;
+ /*
+ * "fromlen shall refer to the value before truncation.."
+ * 1003.1g
+ */
+ if (len >= 0)
+ unsafe_put_user(klen, ulen, efault_end);
+
+ user_access_end();
+
if (len) {
+ if (len < 0)
+ return -EINVAL;
if (audit_sockaddr(klen, kaddr))
return -ENOMEM;
if (copy_to_user(uaddr, kaddr, len))
return -EFAULT;
}
- /*
- * "fromlen shall refer to the value before truncation.."
- * 1003.1g
- */
- return __put_user(klen, ulen);
+ return 0;
+
+efault_end:
+ user_access_end();
+ return -EFAULT;
}
static struct kmem_cache *sock_inode_cachep __ro_after_init;
@@ -592,10 +605,12 @@ static int sockfs_setattr(struct mnt_idmap *idmap,
if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
- if (sock->sk)
- sock->sk->sk_uid = iattr->ia_uid;
- else
+ if (sock->sk) {
+ /* Paired with READ_ONCE() in sk_uid() */
+ WRITE_ONCE(sock->sk->sk_uid, iattr->ia_uid);
+ } else {
err = -ENOENT;
+ }
}
return err;
@@ -843,6 +858,52 @@ static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb,
sizeof(ts_pktinfo), &ts_pktinfo);
}
+bool skb_has_tx_timestamp(struct sk_buff *skb, const struct sock *sk)
+{
+ const struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+
+ if (serr->ee.ee_errno != ENOMSG ||
+ serr->ee.ee_origin != SO_EE_ORIGIN_TIMESTAMPING)
+ return false;
+
+ /* software time stamp available and wanted */
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) && skb->tstamp)
+ return true;
+ /* hardware time stamps available and wanted */
+ return (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ skb_hwtstamps(skb)->hwtstamp;
+}
+
+int skb_get_tx_timestamp(struct sk_buff *skb, struct sock *sk,
+ struct timespec64 *ts)
+{
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t hwtstamp;
+ int if_index = 0;
+
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ ktime_to_timespec64_cond(skb->tstamp, ts))
+ return SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ if (!(tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) ||
+ skb_is_swtx_tstamp(skb, false))
+ return -ENOENT;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
+ hwtstamp = get_timestamp(sk, skb, &if_index);
+ else
+ hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+
+ if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ hwtstamp = ptp_convert_timestamp(&hwtstamp,
+ READ_ONCE(sk->sk_bind_phc));
+ if (!ktime_to_timespec64_cond(hwtstamp, ts))
+ return -ENOENT;
+
+ return SOF_TIMESTAMPING_TX_HARDWARE;
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
@@ -1128,6 +1189,9 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
+ if (iocb->ki_flags & IOCB_NOSIGNAL)
+ msg.msg_flags |= MSG_NOSIGNAL;
+
res = __sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d946bfb424c7..43b1f558b33d 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -333,7 +333,7 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
struct strparser *strp = (struct strparser *)desc->arg.data;
return __strp_recv(desc, orig_skb, orig_offset, orig_len,
- strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo);
+ strp->sk->sk_rcvbuf, READ_ONCE(strp->sk->sk_rcvtimeo));
}
static int default_read_sock_done(struct strparser *strp, int err)
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 2d8b67dac7b5..984e0cf9bf8a 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,9 +18,10 @@ config SUNRPC_SWAP
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
- depends on SUNRPC && CRYPTO
+ depends on SUNRPC
default y
select SUNRPC_GSS
+ select CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
@@ -101,6 +102,20 @@ config SUNRPC_DEBUG
If unsure, say Y.
+config SUNRPC_DEBUG_TRACE
+ bool "RPC: Send dfprintk() output to the trace buffer"
+ depends on SUNRPC_DEBUG && TRACING
+ default n
+ help
+ dprintk() output can be voluminous, which can overwhelm the
+ kernel's logging facility as it must be sent to the console.
+ This option causes dprintk() output to go to the trace buffer
+ instead of the kernel log.
+
+ This will cause warnings about trace_printk() being used to be
+ logged at boot time, so say N unless you are debugging a problem
+ with sunrpc-based clients or services.
+
config SUNRPC_XPRT_RDMA
tristate "RPC-over-RDMA transport"
depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 0fa244f16876..5c095cb8cb20 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -887,25 +887,16 @@ static void gss_pipe_dentry_destroy(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct gss_pipe *gss_pipe = pdo->pdo_data;
- struct rpc_pipe *pipe = gss_pipe->pipe;
- if (pipe->dentry != NULL) {
- rpc_unlink(pipe->dentry);
- pipe->dentry = NULL;
- }
+ rpc_unlink(gss_pipe->pipe);
}
static int gss_pipe_dentry_create(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct gss_pipe *p = pdo->pdo_data;
- struct dentry *dentry;
- dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- p->pipe->dentry = dentry;
- return 0;
+ return rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
}
static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
@@ -1724,7 +1715,7 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[0], seq, p, len);
/* RFC 2203 5.3.3.1 - compute the checksum of each sequence number in the cache */
while (unlikely(maj_stat == GSS_S_BAD_SIG && i < task->tk_rqstp->rq_seqno_count))
- maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i], seq, p, len);
+ maj_stat = gss_validate_seqno_mic(ctx, task->tk_rqstp->rq_seqnos[i++], seq, p, len);
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
if (maj_stat)
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 8f2d65c1e831..16dcf115de1e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -875,8 +875,8 @@ out_err:
* krb5_etm_decrypt - Decrypt using the RFC 8009 rules
* @kctx: Kerberos context
* @offset: starting offset of the ciphertext, in bytes
- * @len:
- * @buf:
+ * @len: size of ciphertext to unwrap
+ * @buf: ciphertext to unwrap
* @headskip: OUT: the enctype's confounder length, in octets
* @tailskip: OUT: the enctype's HMAC length, in octets
*
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index cb32ab9a8395..7d2cdc2bd374 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -794,12 +794,12 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
struct gssx_res_accept_sec_context *res = data;
u32 value_follows;
int err;
- struct page *scratch;
+ struct folio *scratch;
- scratch = alloc_page(GFP_KERNEL);
+ scratch = folio_alloc(GFP_KERNEL, 0);
if (!scratch)
return -ENOMEM;
- xdr_set_scratch_page(xdr, scratch);
+ xdr_set_scratch_folio(xdr, scratch);
/* res->status */
err = gssx_dec_status(xdr, &res->status);
@@ -844,6 +844,6 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
err = gssx_dec_option_array(xdr, &res->options);
out_free:
- __free_page(scratch);
+ folio_put(scratch);
return err;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 73a90ad873fb..a8ec30759a18 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
- if (flavor != RPC_AUTH_GSS) {
+ if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
@@ -1628,7 +1628,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp)
int ret;
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
- rqstp->rq_auth_stat = rpc_autherr_badcred;
+ rqstp->rq_auth_stat = rpc_autherr_failed;
if (!svcdata)
svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
if (!svcdata)
@@ -1638,6 +1638,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp)
svcdata->rsci = NULL;
gc = &svcdata->clcred;
+ rqstp->rq_auth_stat = rpc_autherr_badcred;
if (!svcauth_gss_decode_credbody(&rqstp->rq_arg_stream, gc, &rpcstart))
goto auth_err;
if (gc->gc_v != RPC_GSS_VERSION)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 21426c3049d3..8ca354ecfd02 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -112,47 +112,46 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
}
}
-static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb,
+static int rpc_setup_pipedir_sb(struct super_block *sb,
struct rpc_clnt *clnt)
{
static uint32_t clntid;
const char *dir_name = clnt->cl_program->pipe_dir_name;
char name[15];
- struct dentry *dir, *dentry;
+ struct dentry *dir;
+ int err;
dir = rpc_d_lookup_sb(sb, dir_name);
if (dir == NULL) {
pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name);
- return dir;
+ return -ENOENT;
}
for (;;) {
snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
name[sizeof(name) - 1] = '\0';
- dentry = rpc_create_client_dir(dir, name, clnt);
- if (!IS_ERR(dentry))
+ err = rpc_create_client_dir(dir, name, clnt);
+ if (!err)
break;
- if (dentry == ERR_PTR(-EEXIST))
+ if (err == -EEXIST)
continue;
printk(KERN_INFO "RPC: Couldn't create pipefs entry"
- " %s/%s, error %ld\n",
- dir_name, name, PTR_ERR(dentry));
+ " %s/%s, error %d\n",
+ dir_name, name, err);
break;
}
dput(dir);
- return dentry;
+ return err;
}
static int
rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
{
- struct dentry *dentry;
-
clnt->pipefs_sb = pipefs_sb;
if (clnt->cl_program->pipe_dir_name != NULL) {
- dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
+ int err = rpc_setup_pipedir_sb(pipefs_sb, clnt);
+ if (err && err != -ENOENT)
+ return err;
}
return 0;
}
@@ -180,16 +179,9 @@ static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
struct super_block *sb)
{
- struct dentry *dentry;
-
switch (event) {
case RPC_PIPEFS_MOUNT:
- dentry = rpc_setup_pipedir_sb(sb, clnt);
- if (!dentry)
- return -ENOENT;
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- break;
+ return rpc_setup_pipedir_sb(sb, clnt);
case RPC_PIPEFS_UMOUNT:
__rpc_clnt_remove_pipedir(clnt);
break;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 98f78cd55905..0bd1df2ebb47 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -168,8 +168,9 @@ rpc_inode_setowner(struct inode *inode, void *private)
}
static void
-rpc_close_pipes(struct inode *inode)
+rpc_close_pipes(struct dentry *dentry)
{
+ struct inode *inode = dentry->d_inode;
struct rpc_pipe *pipe = RPC_I(inode)->pipe;
int need_release;
LIST_HEAD(free_list);
@@ -484,60 +485,6 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
return inode;
}
-static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
- umode_t mode,
- const struct file_operations *i_fop,
- void *private)
-{
- struct inode *inode;
-
- d_drop(dentry);
- inode = rpc_get_inode(dir->i_sb, mode);
- if (!inode)
- goto out_err;
- inode->i_ino = iunique(dir->i_sb, 100);
- if (i_fop)
- inode->i_fop = i_fop;
- if (private)
- rpc_inode_setowner(inode, private);
- d_add(dentry, inode);
- return 0;
-out_err:
- printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %pd\n",
- __FILE__, __func__, dentry);
- dput(dentry);
- return -ENOMEM;
-}
-
-static int __rpc_create(struct inode *dir, struct dentry *dentry,
- umode_t mode,
- const struct file_operations *i_fop,
- void *private)
-{
- int err;
-
- err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private);
- if (err)
- return err;
- fsnotify_create(dir, dentry);
- return 0;
-}
-
-static int __rpc_mkdir(struct inode *dir, struct dentry *dentry,
- umode_t mode,
- const struct file_operations *i_fop,
- void *private)
-{
- int err;
-
- err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private);
- if (err)
- return err;
- inc_nlink(dir);
- fsnotify_mkdir(dir, dentry);
- return 0;
-}
-
static void
init_pipe(struct rpc_pipe *pipe)
{
@@ -574,119 +521,60 @@ struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags)
}
EXPORT_SYMBOL_GPL(rpc_mkpipe_data);
-static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry,
- umode_t mode,
- const struct file_operations *i_fop,
- void *private,
- struct rpc_pipe *pipe)
+static int rpc_new_file(struct dentry *parent,
+ const char *name,
+ umode_t mode,
+ const struct file_operations *i_fop,
+ void *private)
{
- struct rpc_inode *rpci;
- int err;
+ struct dentry *dentry = simple_start_creating(parent, name);
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
- err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private);
- if (err)
- return err;
- rpci = RPC_I(d_inode(dentry));
- rpci->private = private;
- rpci->pipe = pipe;
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ inode = rpc_get_inode(dir->i_sb, S_IFREG | mode);
+ if (unlikely(!inode)) {
+ dput(dentry);
+ inode_unlock(dir);
+ return -ENOMEM;
+ }
+ inode->i_ino = iunique(dir->i_sb, 100);
+ if (i_fop)
+ inode->i_fop = i_fop;
+ rpc_inode_setowner(inode, private);
+ d_instantiate(dentry, inode);
fsnotify_create(dir, dentry);
+ inode_unlock(dir);
return 0;
}
-static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
-{
- int ret;
-
- dget(dentry);
- ret = simple_rmdir(dir, dentry);
- d_drop(dentry);
- if (!ret)
- fsnotify_rmdir(dir, dentry);
- dput(dentry);
- return ret;
-}
-
-static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
-{
- int ret;
-
- dget(dentry);
- ret = simple_unlink(dir, dentry);
- d_drop(dentry);
- if (!ret)
- fsnotify_unlink(dir, dentry);
- dput(dentry);
- return ret;
-}
-
-static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
+static struct dentry *rpc_new_dir(struct dentry *parent,
+ const char *name,
+ umode_t mode)
{
- struct inode *inode = d_inode(dentry);
-
- rpc_close_pipes(inode);
- return __rpc_unlink(dir, dentry);
-}
+ struct dentry *dentry = simple_start_creating(parent, name);
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
-static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
- const char *name)
-{
- struct qstr q = QSTR(name);
- struct dentry *dentry = try_lookup_noperm(&q, parent);
- if (!dentry) {
- dentry = d_alloc(parent, &q);
- if (!dentry)
- return ERR_PTR(-ENOMEM);
- }
- if (d_really_is_negative(dentry))
+ if (IS_ERR(dentry))
return dentry;
- dput(dentry);
- return ERR_PTR(-EEXIST);
-}
-/*
- * FIXME: This probably has races.
- */
-static void __rpc_depopulate(struct dentry *parent,
- const struct rpc_filelist *files,
- int start, int eof)
-{
- struct inode *dir = d_inode(parent);
- struct dentry *dentry;
- struct qstr name;
- int i;
-
- for (i = start; i < eof; i++) {
- name.name = files[i].name;
- name.len = strlen(files[i].name);
- dentry = try_lookup_noperm(&name, parent);
-
- if (dentry == NULL)
- continue;
- if (d_really_is_negative(dentry))
- goto next;
- switch (d_inode(dentry)->i_mode & S_IFMT) {
- default:
- BUG();
- case S_IFREG:
- __rpc_unlink(dir, dentry);
- break;
- case S_IFDIR:
- __rpc_rmdir(dir, dentry);
- }
-next:
+ inode = rpc_get_inode(dir->i_sb, S_IFDIR | mode);
+ if (unlikely(!inode)) {
dput(dentry);
+ inode_unlock(dir);
+ return ERR_PTR(-ENOMEM);
}
-}
-static void rpc_depopulate(struct dentry *parent,
- const struct rpc_filelist *files,
- int start, int eof)
-{
- struct inode *dir = d_inode(parent);
-
- inode_lock_nested(dir, I_MUTEX_CHILD);
- __rpc_depopulate(parent, files, start, eof);
+ inode->i_ino = iunique(dir->i_sb, 100);
+ inc_nlink(dir);
+ d_instantiate(dentry, inode);
+ fsnotify_mkdir(dir, dentry);
inode_unlock(dir);
+
+ return dentry;
}
static int rpc_populate(struct dentry *parent,
@@ -694,92 +582,39 @@ static int rpc_populate(struct dentry *parent,
int start, int eof,
void *private)
{
- struct inode *dir = d_inode(parent);
struct dentry *dentry;
int i, err;
- inode_lock(dir);
for (i = start; i < eof; i++) {
- dentry = __rpc_lookup_create_exclusive(parent, files[i].name);
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_bad;
switch (files[i].mode & S_IFMT) {
default:
BUG();
case S_IFREG:
- err = __rpc_create(dir, dentry,
+ err = rpc_new_file(parent,
+ files[i].name,
files[i].mode,
files[i].i_fop,
private);
+ if (err)
+ goto out_bad;
break;
case S_IFDIR:
- err = __rpc_mkdir(dir, dentry,
- files[i].mode,
- NULL,
- private);
+ dentry = rpc_new_dir(parent,
+ files[i].name,
+ files[i].mode);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out_bad;
+ }
}
- if (err != 0)
- goto out_bad;
}
- inode_unlock(dir);
return 0;
out_bad:
- __rpc_depopulate(parent, files, start, eof);
- inode_unlock(dir);
printk(KERN_WARNING "%s: %s failed to populate directory %pd\n",
__FILE__, __func__, parent);
return err;
}
-static struct dentry *rpc_mkdir_populate(struct dentry *parent,
- const char *name, umode_t mode, void *private,
- int (*populate)(struct dentry *, void *), void *args_populate)
-{
- struct dentry *dentry;
- struct inode *dir = d_inode(parent);
- int error;
-
- inode_lock_nested(dir, I_MUTEX_PARENT);
- dentry = __rpc_lookup_create_exclusive(parent, name);
- if (IS_ERR(dentry))
- goto out;
- error = __rpc_mkdir(dir, dentry, mode, NULL, private);
- if (error != 0)
- goto out_err;
- if (populate != NULL) {
- error = populate(dentry, args_populate);
- if (error)
- goto err_rmdir;
- }
-out:
- inode_unlock(dir);
- return dentry;
-err_rmdir:
- __rpc_rmdir(dir, dentry);
-out_err:
- dentry = ERR_PTR(error);
- goto out;
-}
-
-static int rpc_rmdir_depopulate(struct dentry *dentry,
- void (*depopulate)(struct dentry *))
-{
- struct dentry *parent;
- struct inode *dir;
- int error;
-
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- inode_lock_nested(dir, I_MUTEX_PARENT);
- if (depopulate != NULL)
- depopulate(dentry);
- error = __rpc_rmdir(dir, dentry);
- inode_unlock(dir);
- dput(parent);
- return error;
-}
-
/**
* rpc_mkpipe_dentry - make an rpc_pipefs file for kernel<->userspace
* communication
@@ -799,11 +634,13 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* The @private argument passed here will be available to all these methods
* from the file pointer, via RPC_I(file_inode(file))->private.
*/
-struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
+int rpc_mkpipe_dentry(struct dentry *parent, const char *name,
void *private, struct rpc_pipe *pipe)
{
- struct dentry *dentry;
struct inode *dir = d_inode(parent);
+ struct dentry *dentry;
+ struct inode *inode;
+ struct rpc_inode *rpci;
umode_t umode = S_IFIFO | 0600;
int err;
@@ -812,48 +649,53 @@ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
if (pipe->ops->downcall == NULL)
umode &= ~0222;
- inode_lock_nested(dir, I_MUTEX_PARENT);
- dentry = __rpc_lookup_create_exclusive(parent, name);
- if (IS_ERR(dentry))
- goto out;
- err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops,
- private, pipe);
- if (err)
- goto out_err;
-out:
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto failed;
+ }
+
+ inode = rpc_get_inode(dir->i_sb, umode);
+ if (unlikely(!inode)) {
+ dput(dentry);
+ inode_unlock(dir);
+ err = -ENOMEM;
+ goto failed;
+ }
+ inode->i_ino = iunique(dir->i_sb, 100);
+ inode->i_fop = &rpc_pipe_fops;
+ rpci = RPC_I(inode);
+ rpci->private = private;
+ rpci->pipe = pipe;
+ rpc_inode_setowner(inode, private);
+ d_instantiate(dentry, inode);
+ pipe->dentry = dentry;
+ fsnotify_create(dir, dentry);
inode_unlock(dir);
- return dentry;
-out_err:
- dentry = ERR_PTR(err);
- printk(KERN_WARNING "%s: %s() failed to create pipe %pd/%s (errno = %d)\n",
- __FILE__, __func__, parent, name,
- err);
- goto out;
+ return 0;
+
+failed:
+ pr_warn("%s() failed to create pipe %pd/%s (errno = %d)\n",
+ __func__, parent, name, err);
+ return err;
}
EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry);
/**
* rpc_unlink - remove a pipe
- * @dentry: dentry for the pipe, as returned from rpc_mkpipe
+ * @pipe: the pipe to be removed
*
* After this call, lookups will no longer find the pipe, and any
* attempts to read or write using preexisting opens of the pipe will
* return -EPIPE.
*/
-int
-rpc_unlink(struct dentry *dentry)
+void
+rpc_unlink(struct rpc_pipe *pipe)
{
- struct dentry *parent;
- struct inode *dir;
- int error = 0;
-
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- inode_lock_nested(dir, I_MUTEX_PARENT);
- error = __rpc_rmpipe(dir, dentry);
- inode_unlock(dir);
- dput(parent);
- return error;
+ if (pipe->dentry) {
+ simple_recursive_removal(pipe->dentry, rpc_close_pipes);
+ pipe->dentry = NULL;
+ }
}
EXPORT_SYMBOL_GPL(rpc_unlink);
@@ -1010,31 +852,6 @@ rpc_destroy_pipe_dir_objects(struct rpc_pipe_dir_head *pdh)
pdo->pdo_ops->destroy(dir, pdo);
}
-enum {
- RPCAUTH_info,
- RPCAUTH_EOF
-};
-
-static const struct rpc_filelist authfiles[] = {
- [RPCAUTH_info] = {
- .name = "info",
- .i_fop = &rpc_info_operations,
- .mode = S_IFREG | 0400,
- },
-};
-
-static int rpc_clntdir_populate(struct dentry *dentry, void *private)
-{
- return rpc_populate(dentry,
- authfiles, RPCAUTH_info, RPCAUTH_EOF,
- private);
-}
-
-static void rpc_clntdir_depopulate(struct dentry *dentry)
-{
- rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF);
-}
-
/**
* rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs
* @dentry: the parent of new directory
@@ -1046,19 +863,27 @@ static void rpc_clntdir_depopulate(struct dentry *dentry)
* information about the client, together with any "pipes" that may
* later be created using rpc_mkpipe().
*/
-struct dentry *rpc_create_client_dir(struct dentry *dentry,
- const char *name,
- struct rpc_clnt *rpc_client)
+int rpc_create_client_dir(struct dentry *dentry,
+ const char *name,
+ struct rpc_clnt *rpc_client)
{
struct dentry *ret;
+ int err;
- ret = rpc_mkdir_populate(dentry, name, 0555, NULL,
- rpc_clntdir_populate, rpc_client);
- if (!IS_ERR(ret)) {
- rpc_client->cl_pipedir_objects.pdh_dentry = ret;
- rpc_create_pipe_dir_objects(&rpc_client->cl_pipedir_objects);
+ ret = rpc_new_dir(dentry, name, 0555);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ err = rpc_new_file(ret, "info", S_IFREG | 0400,
+ &rpc_info_operations, rpc_client);
+ if (err) {
+ pr_warn("%s failed to populate directory %pd\n",
+ __func__, ret);
+ simple_recursive_removal(ret, NULL);
+ return err;
}
- return ret;
+ rpc_client->cl_pipedir_objects.pdh_dentry = ret;
+ rpc_create_pipe_dir_objects(&rpc_client->cl_pipedir_objects);
+ return 0;
}
/**
@@ -1073,7 +898,8 @@ int rpc_remove_client_dir(struct rpc_clnt *rpc_client)
return 0;
rpc_destroy_pipe_dir_objects(&rpc_client->cl_pipedir_objects);
rpc_client->cl_pipedir_objects.pdh_dentry = NULL;
- return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate);
+ simple_recursive_removal(dentry, NULL);
+ return 0;
}
static const struct rpc_filelist cache_pipefs_files[3] = {
@@ -1094,28 +920,25 @@ static const struct rpc_filelist cache_pipefs_files[3] = {
},
};
-static int rpc_cachedir_populate(struct dentry *dentry, void *private)
-{
- return rpc_populate(dentry,
- cache_pipefs_files, 0, 3,
- private);
-}
-
-static void rpc_cachedir_depopulate(struct dentry *dentry)
-{
- rpc_depopulate(dentry, cache_pipefs_files, 0, 3);
-}
-
struct dentry *rpc_create_cache_dir(struct dentry *parent, const char *name,
umode_t umode, struct cache_detail *cd)
{
- return rpc_mkdir_populate(parent, name, umode, NULL,
- rpc_cachedir_populate, cd);
+ struct dentry *dentry;
+
+ dentry = rpc_new_dir(parent, name, umode);
+ if (!IS_ERR(dentry)) {
+ int error = rpc_populate(dentry, cache_pipefs_files, 0, 3, cd);
+ if (error) {
+ simple_recursive_removal(dentry, NULL);
+ return ERR_PTR(error);
+ }
+ }
+ return dentry;
}
void rpc_remove_cache_dir(struct dentry *dentry)
{
- rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate);
+ simple_recursive_removal(dentry, NULL);
}
/*
@@ -1141,7 +964,6 @@ enum {
RPCAUTH_nfsd4_cb,
RPCAUTH_cache,
RPCAUTH_nfsd,
- RPCAUTH_gssd,
RPCAUTH_RootEOF
};
@@ -1178,10 +1000,6 @@ static const struct rpc_filelist files[] = {
.name = "nfsd",
.mode = S_IFDIR | 0555,
},
- [RPCAUTH_gssd] = {
- .name = "gssd",
- .mode = S_IFDIR | 0555,
- },
};
/*
@@ -1241,13 +1059,6 @@ void rpc_put_sb_net(const struct net *net)
}
EXPORT_SYMBOL_GPL(rpc_put_sb_net);
-static const struct rpc_filelist gssd_dummy_clnt_dir[] = {
- [0] = {
- .name = "clntXX",
- .mode = S_IFDIR | 0555,
- },
-};
-
static ssize_t
dummy_downcall(struct file *filp, const char __user *src, size_t len)
{
@@ -1276,14 +1087,6 @@ rpc_dummy_info_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(rpc_dummy_info);
-static const struct rpc_filelist gssd_dummy_info_file[] = {
- [0] = {
- .name = "info",
- .i_fop = &rpc_dummy_info_fops,
- .mode = S_IFREG | 0400,
- },
-};
-
/**
* rpc_gssd_dummy_populate - create a dummy gssd pipe
* @root: root of the rpc_pipefs filesystem
@@ -1292,69 +1095,32 @@ static const struct rpc_filelist gssd_dummy_info_file[] = {
* Create a dummy set of directories and a pipe that gssd can hold open to
* indicate that it is up and running.
*/
-static struct dentry *
+static int
rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
{
- int ret = 0;
- struct dentry *gssd_dentry;
- struct dentry *clnt_dentry = NULL;
- struct dentry *pipe_dentry = NULL;
-
- /* We should never get this far if "gssd" doesn't exist */
- gssd_dentry = try_lookup_noperm(&QSTR(files[RPCAUTH_gssd].name), root);
- if (!gssd_dentry)
- return ERR_PTR(-ENOENT);
-
- ret = rpc_populate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1, NULL);
- if (ret) {
- pipe_dentry = ERR_PTR(ret);
- goto out;
- }
+ struct dentry *gssd_dentry, *clnt_dentry;
+ int err;
- clnt_dentry = try_lookup_noperm(&QSTR(gssd_dummy_clnt_dir[0].name),
- gssd_dentry);
- if (!clnt_dentry) {
- __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
- pipe_dentry = ERR_PTR(-ENOENT);
- goto out;
- }
+ gssd_dentry = rpc_new_dir(root, "gssd", 0555);
+ if (IS_ERR(gssd_dentry))
+ return -ENOENT;
- ret = rpc_populate(clnt_dentry, gssd_dummy_info_file, 0, 1, NULL);
- if (ret) {
- __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
- pipe_dentry = ERR_PTR(ret);
- goto out;
- }
+ clnt_dentry = rpc_new_dir(gssd_dentry, "clntXX", 0555);
+ if (IS_ERR(clnt_dentry))
+ return -ENOENT;
- pipe_dentry = rpc_mkpipe_dentry(clnt_dentry, "gssd", NULL, pipe_data);
- if (IS_ERR(pipe_dentry)) {
- __rpc_depopulate(clnt_dentry, gssd_dummy_info_file, 0, 1);
- __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
- }
-out:
- dput(clnt_dentry);
- dput(gssd_dentry);
- return pipe_dentry;
-}
-
-static void
-rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
-{
- struct dentry *clnt_dir = pipe_dentry->d_parent;
- struct dentry *gssd_dir = clnt_dir->d_parent;
-
- dget(pipe_dentry);
- __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
- __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
- __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
- dput(pipe_dentry);
+ err = rpc_new_file(clnt_dentry, "info", 0400,
+ &rpc_dummy_info_fops, NULL);
+ if (!err)
+ err = rpc_mkpipe_dentry(clnt_dentry, "gssd", NULL, pipe_data);
+ return err;
}
static int
rpc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
- struct dentry *root, *gssd_dentry;
+ struct dentry *root;
struct net *net = sb->s_fs_info;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int err;
@@ -1363,7 +1129,7 @@ rpc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = RPCAUTH_GSSMAGIC;
sb->s_op = &s_ops;
- sb->s_d_op = &simple_dentry_operations;
+ sb->s_d_flags = DCACHE_DONTCACHE;
sb->s_time_gran = 1;
inode = rpc_get_inode(sb, S_IFDIR | 0555);
@@ -1373,11 +1139,9 @@ rpc_fill_super(struct super_block *sb, struct fs_context *fc)
if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
return -ENOMEM;
- gssd_dentry = rpc_gssd_dummy_populate(root, sn->gssd_dummy);
- if (IS_ERR(gssd_dentry)) {
- __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
- return PTR_ERR(gssd_dentry);
- }
+ err = rpc_gssd_dummy_populate(root, sn->gssd_dummy);
+ if (err)
+ return err;
dprintk("RPC: sending pipefs MOUNT notification for net %x%s\n",
net->ns.inum, NET_NAME(net));
@@ -1386,18 +1150,6 @@ rpc_fill_super(struct super_block *sb, struct fs_context *fc)
err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_MOUNT,
sb);
- if (err)
- goto err_depopulate;
- mutex_unlock(&sn->pipefs_sb_lock);
- return 0;
-
-err_depopulate:
- rpc_gssd_dummy_depopulate(gssd_dentry);
- blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
- RPC_PIPEFS_UMOUNT,
- sb);
- sn->pipefs_sb = NULL;
- __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
mutex_unlock(&sn->pipefs_sb_lock);
return err;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 73bc39281ef5..016f16ca5779 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- if (unlikely(current->flags & PF_EXITING))
- return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
@@ -1076,7 +1074,6 @@ int rpc_malloc(struct rpc_task *task)
rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
return 0;
}
-EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free RPC buffer resources allocated via rpc_malloc
@@ -1097,7 +1094,6 @@ void rpc_free(struct rpc_task *task)
else
kfree(buf);
}
-EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 1b2b84feeec6..d8d8842c7de5 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -27,135 +27,91 @@
struct xdr_skb_reader {
struct sk_buff *skb;
unsigned int offset;
+ bool need_checksum;
size_t count;
__wsum csum;
};
-typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to,
- size_t len);
-
/**
* xdr_skb_read_bits - copy some data bits from skb to internal buffer
* @desc: sk_buff copy helper
* @to: copy destination
* @len: number of bytes to copy
*
- * Possibly called several times to iterate over an sk_buff and copy
- * data out of it.
+ * Possibly called several times to iterate over an sk_buff and copy data out of
+ * it.
*/
static size_t
xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
{
- if (len > desc->count)
- len = desc->count;
- if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
- return 0;
- desc->count -= len;
- desc->offset += len;
- return len;
-}
+ len = min(len, desc->count);
+
+ if (desc->need_checksum) {
+ __wsum csum;
+
+ csum = skb_copy_and_csum_bits(desc->skb, desc->offset, to, len);
+ desc->csum = csum_block_add(desc->csum, csum, desc->offset);
+ } else {
+ if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
+ return 0;
+ }
-/**
- * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
- * @desc: sk_buff copy helper
- * @to: copy destination
- * @len: number of bytes to copy
- *
- * Same as skb_read_bits, but calculate a checksum at the same time.
- */
-static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
-{
- unsigned int pos;
- __wsum csum2;
-
- if (len > desc->count)
- len = desc->count;
- pos = desc->offset;
- csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len);
- desc->csum = csum_block_add(desc->csum, csum2, pos);
desc->count -= len;
desc->offset += len;
return len;
}
-/**
- * xdr_partial_copy_from_skb - copy data out of an skb
- * @xdr: target XDR buffer
- * @base: starting offset
- * @desc: sk_buff copy helper
- * @copy_actor: virtual method for copying data
- *
- */
static ssize_t
-xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
+xdr_partial_copy_from_skb(struct xdr_buf *xdr, struct xdr_skb_reader *desc)
{
- struct page **ppage = xdr->pages;
- unsigned int len, pglen = xdr->page_len;
- ssize_t copied = 0;
- size_t ret;
-
- len = xdr->head[0].iov_len;
- if (base < len) {
- len -= base;
- ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len);
- copied += ret;
- if (ret != len || !desc->count)
- goto out;
- base = 0;
- } else
- base -= len;
-
- if (unlikely(pglen == 0))
- goto copy_tail;
- if (unlikely(base >= pglen)) {
- base -= pglen;
- goto copy_tail;
- }
- if (base || xdr->page_base) {
- pglen -= base;
- base += xdr->page_base;
- ppage += base >> PAGE_SHIFT;
- base &= ~PAGE_MASK;
- }
- do {
+ struct page **ppage = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+ unsigned int poff = xdr->page_base & ~PAGE_MASK;
+ unsigned int pglen = xdr->page_len;
+ ssize_t copied = 0;
+ size_t ret;
+
+ if (xdr->head[0].iov_len == 0)
+ return 0;
+
+ ret = xdr_skb_read_bits(desc, xdr->head[0].iov_base,
+ xdr->head[0].iov_len);
+ if (ret != xdr->head[0].iov_len || !desc->count)
+ return ret;
+ copied += ret;
+
+ while (pglen) {
+ unsigned int len = min(PAGE_SIZE - poff, pglen);
char *kaddr;
/* ACL likes to be lazy in allocating pages - ACLs
* are small by default but can get huge. */
if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
- *ppage = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
+ *ppage = alloc_page(GFP_NOWAIT);
if (unlikely(*ppage == NULL)) {
if (copied == 0)
- copied = -ENOMEM;
- goto out;
+ return -ENOMEM;
+ return copied;
}
}
- len = PAGE_SIZE;
kaddr = kmap_atomic(*ppage);
- if (base) {
- len -= base;
- if (pglen < len)
- len = pglen;
- ret = copy_actor(desc, kaddr + base, len);
- base = 0;
- } else {
- if (pglen < len)
- len = pglen;
- ret = copy_actor(desc, kaddr, len);
- }
+ ret = xdr_skb_read_bits(desc, kaddr + poff, len);
flush_dcache_page(*ppage);
kunmap_atomic(kaddr);
+
copied += ret;
if (ret != len || !desc->count)
- goto out;
+ return copied;
ppage++;
- } while ((pglen -= len) != 0);
-copy_tail:
- len = xdr->tail[0].iov_len;
- if (base < len)
- copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
-out:
+ pglen -= len;
+ poff = 0;
+ }
+
+ if (xdr->tail[0].iov_len) {
+ copied += xdr_skb_read_bits(desc, xdr->tail[0].iov_base,
+ xdr->tail[0].iov_len);
+ }
+
return copied;
}
@@ -169,17 +125,22 @@ out:
*/
int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
{
- struct xdr_skb_reader desc;
-
- desc.skb = skb;
- desc.offset = 0;
- desc.count = skb->len - desc.offset;
+ struct xdr_skb_reader desc = {
+ .skb = skb,
+ .count = skb->len - desc.offset,
+ };
- if (skb_csum_unnecessary(skb))
- goto no_checksum;
+ if (skb_csum_unnecessary(skb)) {
+ if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
+ return -1;
+ if (desc.count)
+ return -1;
+ return 0;
+ }
+ desc.need_checksum = true;
desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
- if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
+ if (xdr_partial_copy_from_skb(xdr, &desc) < 0)
return -1;
if (desc.offset != skb->len) {
__wsum csum2;
@@ -194,14 +155,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
!skb->csum_complete_sw)
netdev_rx_csum_fault(skb->dev, skb);
return 0;
-no_checksum:
- if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
- return -1;
- if (desc.count)
- return -1;
- return 0;
}
-EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
static inline int xprt_sendmsg(struct socket *sock, struct msghdr *msg,
size_t seek)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 939b6239df8a..4704dce7284e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -352,7 +352,7 @@ static int svc_pool_map_get_node(unsigned int pidx)
if (m->mode == SVC_POOL_PERNODE)
return m->pool_to[pidx];
}
- return NUMA_NO_NODE;
+ return numa_mem_id();
}
/*
* Set the given thread's cpus_allowed mask so that it
@@ -436,7 +436,6 @@ void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
svc_unregister(serv, net);
rpcb_put_local(net);
}
-EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
static int svc_uses_rpcbind(struct svc_serv *serv)
{
@@ -638,8 +637,6 @@ EXPORT_SYMBOL_GPL(svc_destroy);
static bool
svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node)
{
- unsigned long ret;
-
rqstp->rq_maxpages = svc_serv_maxpages(serv);
/* rq_pages' last entry is NULL for historical reasons. */
@@ -649,9 +646,7 @@ svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node)
if (!rqstp->rq_pages)
return false;
- ret = alloc_pages_bulk_node(GFP_KERNEL, node, rqstp->rq_maxpages,
- rqstp->rq_pages);
- return ret == rqstp->rq_maxpages;
+ return true;
}
/*
@@ -674,8 +669,8 @@ svc_rqst_free(struct svc_rqst *rqstp)
folio_batch_release(&rqstp->rq_fbatch);
kfree(rqstp->rq_bvec);
svc_release_buffer(rqstp);
- if (rqstp->rq_scratch_page)
- put_page(rqstp->rq_scratch_page);
+ if (rqstp->rq_scratch_folio)
+ folio_put(rqstp->rq_scratch_folio);
kfree(rqstp->rq_resp);
kfree(rqstp->rq_argp);
kfree(rqstp->rq_auth_data);
@@ -696,8 +691,8 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
- rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0);
- if (!rqstp->rq_scratch_page)
+ rqstp->rq_scratch_folio = __folio_alloc_node(GFP_KERNEL, 0, node);
+ if (!rqstp->rq_scratch_folio)
goto out_enomem;
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
@@ -755,14 +750,16 @@ void svc_pool_wake_idle_thread(struct svc_pool *pool)
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
if (!task_is_running(rqstp->rq_task)) {
wake_up_process(rqstp->rq_task);
- trace_svc_wake_up(rqstp->rq_task->pid);
+ trace_svc_pool_thread_wake(pool, rqstp->rq_task->pid);
percpu_counter_inc(&pool->sp_threads_woken);
+ } else {
+ trace_svc_pool_thread_running(pool, rqstp->rq_task->pid);
}
rcu_read_unlock();
return;
}
rcu_read_unlock();
-
+ trace_svc_pool_thread_noidle(pool, 0);
}
EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
@@ -1336,6 +1333,9 @@ svc_process_common(struct svc_rqst *rqstp)
int pr, rc;
__be32 *p;
+ /* Reset the accept_stat for the RPC */
+ rqstp->rq_accept_statp = NULL;
+
/* Will be turned off only when NFSv4 Sessions are used */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
clear_bit(RQ_DROPME, &rqstp->rq_flags);
@@ -1375,9 +1375,8 @@ svc_process_common(struct svc_rqst *rqstp)
case SVC_OK:
break;
case SVC_GARBAGE:
- goto err_garbage_args;
- case SVC_SYSERR:
- goto err_system_err;
+ rqstp->rq_auth_stat = rpc_autherr_badcred;
+ goto err_bad_auth;
case SVC_DENIED:
goto err_bad_auth;
case SVC_CLOSE:
@@ -1388,7 +1387,8 @@ svc_process_common(struct svc_rqst *rqstp)
goto sendit;
default:
pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res);
- goto err_system_err;
+ rqstp->rq_auth_stat = rpc_autherr_failed;
+ goto err_bad_auth;
}
if (progp == NULL)
@@ -1425,8 +1425,6 @@ svc_process_common(struct svc_rqst *rqstp)
/* Call the function that processes the request. */
rc = process.dispatch(rqstp);
- if (procp->pc_release)
- procp->pc_release(rqstp);
xdr_finish_decode(xdr);
if (!rc)
@@ -1515,20 +1513,6 @@ err_bad_proc:
serv->sv_stats->rpcbadfmt++;
*rqstp->rq_accept_statp = rpc_proc_unavail;
goto sendit;
-
-err_garbage_args:
- svc_printk(rqstp, "failed to decode RPC header\n");
-
- if (serv->sv_stats)
- serv->sv_stats->rpcbadfmt++;
- *rqstp->rq_accept_statp = rpc_garbage_args;
- goto sendit;
-
-err_system_err:
- if (serv->sv_stats)
- serv->sv_stats->rpcbadfmt++;
- *rqstp->rq_accept_statp = rpc_system_err;
- goto sendit;
}
/*
@@ -1539,6 +1523,14 @@ static void svc_drop(struct svc_rqst *rqstp)
trace_svc_drop(rqstp);
}
+static void svc_release_rqst(struct svc_rqst *rqstp)
+{
+ const struct svc_procedure *procp = rqstp->rq_procinfo;
+
+ if (procp && procp->pc_release)
+ procp->pc_release(rqstp);
+}
+
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
@@ -1578,9 +1570,12 @@ void svc_process(struct svc_rqst *rqstp)
if (unlikely(*p != rpc_call))
goto out_baddir;
- if (!svc_process_common(rqstp))
+ if (!svc_process_common(rqstp)) {
+ svc_release_rqst(rqstp);
goto out_drop;
+ }
svc_send(rqstp);
+ svc_release_rqst(rqstp);
return;
out_baddir:
@@ -1648,6 +1643,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
+ svc_release_rqst(rqstp);
return;
}
/* Finally, send the reply synchronously */
@@ -1661,6 +1657,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
+ svc_release_rqst(rqstp);
if (IS_ERR(task))
return;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 8b1837228799..6973184ff667 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1014,6 +1014,19 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
struct svc_serv *serv = xprt->xpt_server;
struct svc_deferred_req *dr;
+ /* unregister with rpcbind for when transport type is TCP or UDP.
+ */
+ if (test_bit(XPT_RPCB_UNREG, &xprt->xpt_flags)) {
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock,
+ sk_xprt);
+ struct socket *sock = svsk->sk_sock;
+
+ if (svc_register(serv, xprt->xpt_net, sock->sk->sk_family,
+ sock->sk->sk_protocol, 0) < 0)
+ pr_warn("failed to unregister %s with rpcbind\n",
+ xprt->xpt_class->xcl_name);
+ }
+
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
return;
@@ -1102,6 +1115,7 @@ static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
* svc_xprt_destroy_all - Destroy transports associated with @serv
* @serv: RPC service to be shut down
* @net: target network namespace
+ * @unregister: true if it is OK to unregister the destroyed xprts
*
* Server threads may still be running (especially in the case where the
* service is still running in other network namespaces).
@@ -1114,7 +1128,8 @@ static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
* threads, we may need to wait a little while and then check again to
* see if they're done.
*/
-void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net)
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net,
+ bool unregister)
{
int delay = 0;
@@ -1124,6 +1139,9 @@ void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net)
svc_clean_up_xprts(serv, net);
msleep(delay++);
}
+
+ if (unregister)
+ svc_rpcb_cleanup(serv, net);
}
EXPORT_SYMBOL_GPL(svc_xprt_destroy_all);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e1c85123b445..7b90abc5cf0e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
}
static int
-svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
+svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags)
{
union {
struct cmsghdr cmsg;
u8 buf[CMSG_SPACE(sizeof(u8))];
} u;
- struct socket *sock = svsk->sk_sock;
+ u8 alert[2];
+ struct kvec alert_kvec = {
+ .iov_base = alert,
+ .iov_len = sizeof(alert),
+ };
+ struct msghdr msg = {
+ .msg_flags = *msg_flags,
+ .msg_control = &u,
+ .msg_controllen = sizeof(u),
+ };
+ int ret;
+
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
+ alert_kvec.iov_len);
+ ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
+ if (ret > 0 &&
+ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
+ iov_iter_revert(&msg.msg_iter, ret);
+ ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN);
+ }
+ return ret;
+}
+
+static int
+svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg)
+{
int ret;
+ struct socket *sock = svsk->sk_sock;
- msg->msg_control = &u;
- msg->msg_controllen = sizeof(u);
ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
- if (unlikely(msg->msg_controllen != sizeof(u)))
- ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
+ if (msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
+ if (ret == 0 || ret == -EIO)
+ ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags);
+ }
return ret;
}
@@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
}
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
if (len > 0)
svc_flush_bvec(bvec, len, seek);
@@ -809,6 +836,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
/* data might have come in before data_ready set up */
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
/* make sure we get destination address info */
switch (svsk->sk_sk->sk_family) {
@@ -1018,7 +1046,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
if (len < 0)
return len;
svsk->sk_tcplen += len;
@@ -1197,7 +1225,7 @@ err_noclose:
* that the pages backing @xdr are unchanging.
*/
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
- rpc_fraghdr marker, unsigned int *sentp)
+ rpc_fraghdr marker)
{
struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES,
@@ -1206,8 +1234,6 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
void *buf;
int ret;
- *sentp = 0;
-
/* The stream record marker is copied into a temporary page
* fragment buffer so that it can be included in rq_bvec.
*/
@@ -1225,10 +1251,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
page_frag_free(buf);
- if (ret < 0)
- return ret;
- *sentp += ret;
- return 0;
+ return ret;
}
/**
@@ -1247,8 +1270,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
struct xdr_buf *xdr = &rqstp->rq_res;
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
(u32)xdr->len);
- unsigned int sent;
- int err;
+ int sent;
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
@@ -1256,9 +1278,9 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
- trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
- if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ sent = svc_tcp_sendmsg(svsk, rqstp, marker);
+ trace_svcsock_tcp_send(xprt, sent);
+ if (sent < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
mutex_unlock(&xprt->xpt_mutex);
return sent;
@@ -1267,10 +1289,10 @@ out_notconn:
mutex_unlock(&xprt->xpt_mutex);
return -ENOTCONN;
out_close:
- pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
+ pr_notice("rpc-srv/tcp: %s: %s %d when sending %zu bytes - shutting down socket\n",
xprt->xpt_server->sv_name,
- (err < 0) ? "got error" : "sent",
- (err < 0) ? err : sent, xdr->len);
+ (sent < 0) ? "got error" : "sent",
+ sent, xdr->len + sizeof(marker));
svc_xprt_deferred_close(xprt);
mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
@@ -1329,6 +1351,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
if (sk->sk_state == TCP_LISTEN) {
strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
} else {
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 09434e1143c5..8b01b7ae2690 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -389,7 +389,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
saddr = (struct sockaddr *)&xprt->addr;
port = rpc_get_port(saddr);
- /* buf_len is the len until the first occurence of either
+ /* buf_len is the len until the first occurrence of either
* '\n' or '\0'
*/
buf_len = strcspn(buf, "\n");
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2ea00e354ba6..70efc727a9cd 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -37,19 +37,6 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
}
EXPORT_SYMBOL_GPL(xdr_encode_netobj);
-__be32 *
-xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
-{
- unsigned int len;
-
- if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
- return NULL;
- obj->len = len;
- obj->data = (u8 *) p;
- return p + XDR_QUADLEN(len);
-}
-EXPORT_SYMBOL_GPL(xdr_decode_netobj);
-
/**
* xdr_encode_opaque_fixed - Encode fixed length opaque data
* @p: pointer to current position in XDR buffer.
@@ -102,21 +89,6 @@ xdr_encode_string(__be32 *p, const char *string)
}
EXPORT_SYMBOL_GPL(xdr_encode_string);
-__be32 *
-xdr_decode_string_inplace(__be32 *p, char **sp,
- unsigned int *lenp, unsigned int maxlen)
-{
- u32 len;
-
- len = be32_to_cpu(*p++);
- if (len > maxlen)
- return NULL;
- *lenp = len;
- *sp = (char *) p;
- return p + XDR_QUADLEN(len);
-}
-EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
-
/**
* xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
* @buf: XDR buffer where string resides
@@ -993,21 +965,18 @@ EXPORT_SYMBOL_GPL(xdr_init_encode);
* xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer into which to encode data
- * @pages: list of pages to decode into
- * @rqst: pointer to controlling rpc_rqst, for debugging
*
*/
-void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
- struct page **pages, struct rpc_rqst *rqst)
+void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf)
{
xdr_reset_scratch_buffer(xdr);
xdr->buf = buf;
- xdr->page_ptr = pages;
+ xdr->page_ptr = buf->pages;
xdr->iov = NULL;
- xdr->p = page_address(*pages);
+ xdr->p = page_address(*xdr->page_ptr);
xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
- xdr->rqst = rqst;
+ xdr->rqst = NULL;
}
EXPORT_SYMBOL_GPL(xdr_init_encode_pages);
@@ -2248,88 +2217,6 @@ out:
EXPORT_SYMBOL_GPL(xdr_process_buf);
/**
- * xdr_stream_decode_opaque - Decode variable length opaque
- * @xdr: pointer to xdr_stream
- * @ptr: location to store opaque data
- * @size: size of storage buffer @ptr
- *
- * Return values:
- * On success, returns size of object stored in *@ptr
- * %-EBADMSG on XDR buffer overflow
- * %-EMSGSIZE on overflow of storage buffer @ptr
- */
-ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
-{
- ssize_t ret;
- void *p;
-
- ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
- if (ret <= 0)
- return ret;
- memcpy(ptr, p, ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
-
-/**
- * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
- * @xdr: pointer to xdr_stream
- * @ptr: location to store pointer to opaque data
- * @maxlen: maximum acceptable object size
- * @gfp_flags: GFP mask to use
- *
- * Return values:
- * On success, returns size of object stored in *@ptr
- * %-EBADMSG on XDR buffer overflow
- * %-EMSGSIZE if the size of the object would exceed @maxlen
- * %-ENOMEM on memory allocation failure
- */
-ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
- size_t maxlen, gfp_t gfp_flags)
-{
- ssize_t ret;
- void *p;
-
- ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
- if (ret > 0) {
- *ptr = kmemdup(p, ret, gfp_flags);
- if (*ptr != NULL)
- return ret;
- ret = -ENOMEM;
- }
- *ptr = NULL;
- return ret;
-}
-EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
-
-/**
- * xdr_stream_decode_string - Decode variable length string
- * @xdr: pointer to xdr_stream
- * @str: location to store string
- * @size: size of storage buffer @str
- *
- * Return values:
- * On success, returns length of NUL-terminated string stored in *@str
- * %-EBADMSG on XDR buffer overflow
- * %-EMSGSIZE on overflow of storage buffer @str
- */
-ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
-{
- ssize_t ret;
- void *p;
-
- ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
- if (ret > 0) {
- memcpy(str, p, ret);
- str[ret] = '\0';
- return strlen(str);
- }
- *str = '\0';
- return ret;
-}
-EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
-
-/**
* xdr_stream_decode_string_dup - Decode and duplicate variable length string
* @xdr: pointer to xdr_stream
* @str: location to store pointer to string
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 1478c41c7e9d..3aac1456e23e 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -190,7 +190,7 @@ rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
while (len > 0) {
if (!*ppages)
- *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
+ *ppages = alloc_page(GFP_NOWAIT);
if (!*ppages)
return -ENOBUFS;
ppages++;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04ff66758fc3..3aa987e7f072 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
static int
xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
- struct cmsghdr *cmsg, int ret)
+ unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
{
u8 content_type = tls_get_record_type(sock->sk, cmsg);
u8 level, description;
@@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
* record, even though there might be more frames
* waiting to be decrypted.
*/
- msg->msg_flags &= ~MSG_EOR;
+ *msg_flags &= ~MSG_EOR;
break;
case TLS_RECORD_TYPE_ALERT:
tls_alert_recv(sock->sk, msg, &level, &description);
@@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
}
static int
-xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
+xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
{
union {
struct cmsghdr cmsg;
u8 buf[CMSG_SPACE(sizeof(u8))];
} u;
+ u8 alert[2];
+ struct kvec alert_kvec = {
+ .iov_base = alert,
+ .iov_len = sizeof(alert),
+ };
+ struct msghdr msg = {
+ .msg_flags = *msg_flags,
+ .msg_control = &u,
+ .msg_controllen = sizeof(u),
+ };
int ret;
- msg->msg_control = &u;
- msg->msg_controllen = sizeof(u);
- ret = sock_recvmsg(sock, msg, flags);
- if (msg->msg_controllen != sizeof(u))
- ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
+ alert_kvec.iov_len);
+ ret = sock_recvmsg(sock, &msg, flags);
+ if (ret > 0) {
+ if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT)
+ iov_iter_revert(&msg.msg_iter, ret);
+ ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
+ -EAGAIN);
+ }
return ret;
}
@@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
ssize_t ret;
if (seek != 0)
iov_iter_advance(&msg->msg_iter, seek);
- ret = xs_sock_recv_cmsg(sock, msg, flags);
+ ret = sock_recvmsg(sock, msg, flags);
+ /* Handle TLS inband control message lazily */
+ if (msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
+ if (ret == 0 || ret == -EIO)
+ ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
+ }
return ret > 0 ? ret + seek : ret;
}
@@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
size_t count)
{
iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
- return xs_sock_recv_cmsg(sock, msg, flags);
+ return xs_sock_recvmsg(sock, msg, flags, 0);
}
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index fd0796269eed..6f5c54cbf8d9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -79,7 +79,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
pr_info("Node number set to %u\n", addr);
}
-char *tipc_nodeid2string(char *str, u8 *id)
+int tipc_nodeid2string(char *str, u8 *id)
{
int i;
u8 c;
@@ -109,7 +109,7 @@ char *tipc_nodeid2string(char *str, u8 *id)
if (i == NODE_ID_LEN) {
memcpy(str, id, NODE_ID_LEN);
str[NODE_ID_LEN] = 0;
- return str;
+ return i;
}
/* Translate to hex string */
@@ -120,5 +120,5 @@ char *tipc_nodeid2string(char *str, u8 *id)
for (i = NODE_ID_STR_LEN - 2; str[i] == '0'; i--)
str[i] = 0;
- return str;
+ return i + 1;
}
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 93f82398283d..a113cf7e1f89 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -130,6 +130,6 @@ static inline int in_own_node(struct net *net, u32 addr)
bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr);
void tipc_set_node_id(struct net *net, u8 *id);
void tipc_set_node_addr(struct net *net, u32 addr);
-char *tipc_nodeid2string(char *str, u8 *id);
+int tipc_nodeid2string(char *str, u8 *id);
#endif
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index ea5bb131ebd0..751904f10aab 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1797,7 +1797,7 @@ exit:
* @b: bearer where the message has been received
*
* If the decryption is successful, the decrypted skb is returned directly or
- * as the callback, the encryption header and auth tag will be trimed out
+ * as the callback, the encryption header and auth tag will be trimmed out
* before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
* Otherwise, the skb will be freed!
* Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 3ee44d731700..931f55f781a1 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -495,11 +495,9 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
/* Set link name for unicast links only */
if (peer_id) {
- tipc_nodeid2string(self_str, tipc_own_id(net));
- if (strlen(self_str) > 16)
+ if (tipc_nodeid2string(self_str, tipc_own_id(net)) > NODE_ID_LEN)
sprintf(self_str, "%x", self);
- tipc_nodeid2string(peer_str, peer_id);
- if (strlen(peer_str) > 16)
+ if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN)
sprintf(peer_str, "%x", peer);
}
/* Peer i/f name will be completed by reset/activate message */
@@ -570,8 +568,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
if (peer_id) {
char peer_str[NODE_ID_STR_LEN] = {0,};
- tipc_nodeid2string(peer_str, peer_id);
- if (strlen(peer_str) > 16)
+ if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN)
sprintf(peer_str, "%x", peer);
/* Broadcast receiver link name: "broadcast-link:<peer>" */
snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 7c61d47ea208..1574a83384f8 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2366,7 +2366,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
"err_overload2!");
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
err = TIPC_ERR_OVERLOAD;
}
@@ -2458,7 +2458,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
/* Overload => reject message back to sender */
onode = tipc_own_addr(sock_net(sk));
- atomic_inc(&sk->sk_drops);
+ sk_drops_inc(sk);
if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
"@sk_enqueue!");
@@ -3642,7 +3642,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
- sock_i_uid(sk))) ||
+ sk_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
TIPC_NLA_SOCK_PAD))
@@ -3657,7 +3657,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
skb_queue_len(&sk->sk_write_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
- atomic_read(&sk->sk_drops)))
+ sk_drops_read(sk)))
goto stat_msg_cancel;
if (tsk->cong_link_cnt &&
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 8ee0c07d00e9..aad7f96b6009 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,7 +57,7 @@
* @conn_idr: identifier set of connection
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
- * @net: network namspace instance
+ * @net: network namespace instance
* @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
@@ -83,7 +83,7 @@ struct tipc_topsrv {
* @sock: socket handler associated with connection
* @flags: indicates connection state
* @server: pointer to connected server
- * @sub_list: lsit to all pertaing subscriptions
+ * @sub_list: list to all pertaining subscriptions
* @sub_lock: lock protecting the subscription list
* @rwork: receive work item
* @outqueue: pointer to first outbound message in queue
@@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net)
for (id = 0; srv->idr_in_use; id++) {
con = idr_find(&srv->conn_idr, id);
if (con) {
+ conn_get(con);
spin_unlock_bh(&srv->idr_lock);
tipc_conn_close(con);
+ conn_put(con);
spin_lock_bh(&srv->idr_lock);
}
}
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 108a4cc2e001..b85ab0fb3b8c 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -172,7 +172,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
struct udp_media_addr *dst, struct dst_cache *cache)
{
struct dst_entry *ndst;
- int ttl, err = 0;
+ int ttl, err;
local_bh_disable();
ndst = dst_cache_get(cache);
@@ -197,7 +197,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
ttl = ip4_dst_hoplimit(&rt->dst);
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0, src->port,
- dst->port, false, true);
+ dst->port, false, true, 0);
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (!ndst) {
@@ -217,13 +217,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
dst_cache_set_ip6(cache, ndst, &fl6.saddr);
}
ttl = ip6_dst_hoplimit(ndst);
- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
- &src->ipv6, &dst->ipv6, 0, ttl, 0,
- src->port, dst->port, false);
+ udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
+ src->port, dst->port, false, 0);
#endif
}
local_bh_enable();
- return err;
+ return 0;
tx_error:
local_bh_enable();
@@ -489,7 +489,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
b = tipc_bearer_find(net, bname);
- if (!b) {
+ if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
rtnl_unlock();
return -EINVAL;
}
@@ -500,7 +500,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
b = rtnl_dereference(tn->bearer_list[bid]);
- if (!b) {
+ if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
rtnl_unlock();
return -EINVAL;
}
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 774859b63f0d..2f86baeb71fc 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -128,8 +128,9 @@ struct tls_rec {
char aad_space[TLS_AAD_SPACE_SIZE];
u8 iv_data[TLS_MAX_IV_SIZE];
+
+ /* Must be last --ends in a flexible-array member. */
struct aead_request aead_req;
- u8 aead_req_ctx[];
};
int __net_init tls_proc_init(struct net *net);
@@ -141,6 +142,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
void tls_err_abort(struct sock *sk, int err);
+void tls_strp_abort_strp(struct tls_strparser *strp, int err);
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
@@ -196,7 +198,7 @@ void tls_strp_msg_done(struct tls_strparser *strp);
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
void tls_rx_msg_ready(struct tls_strparser *strp);
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
+bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f672a62a9a52..a64ae15b1a60 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -123,17 +123,19 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
/* We assume that the socket is already connected */
static struct net_device *get_netdev_for_sock(struct sock *sk)
{
- struct dst_entry *dst = sk_dst_get(sk);
- struct net_device *netdev = NULL;
+ struct net_device *dev, *lowest_dev = NULL;
+ struct dst_entry *dst;
- if (likely(dst)) {
- netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
- dev_hold(netdev);
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+ if (likely(dev)) {
+ lowest_dev = netdev_sk_get_lowest_dev(dev, sk);
+ dev_hold(lowest_dev);
}
+ rcu_read_unlock();
- dst_release(dst);
-
- return netdev;
+ return lowest_dev;
}
static void destroy_record(struct tls_record_info *record)
@@ -1410,7 +1412,7 @@ int __init tls_device_init(void)
if (!dummy_page)
return -ENOMEM;
- destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
+ destruct_wq = alloc_workqueue("ktls_device_destruct", WQ_PERCPU, 0);
if (!destruct_wq) {
err = -ENOMEM;
goto err_free_dummy;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index a3ccb3135e51..39a2ab47fe72 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -255,12 +255,9 @@ int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
if (msg->msg_flags & MSG_MORE)
return -EINVAL;
- rc = tls_handle_open_record(sk, msg->msg_flags);
- if (rc)
- return rc;
-
*record_type = *(unsigned char *)CMSG_DATA(cmsg);
- rc = 0;
+
+ rc = tls_handle_open_record(sk, msg->msg_flags);
break;
default:
return -EINVAL;
diff --git a/net/tls/tls_proc.c b/net/tls/tls_proc.c
index 367666aa07b8..4012c4372d4c 100644
--- a/net/tls/tls_proc.c
+++ b/net/tls/tls_proc.c
@@ -27,17 +27,19 @@ static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsTxRekeyOk", LINUX_MIB_TLSTXREKEYOK),
SNMP_MIB_ITEM("TlsTxRekeyError", LINUX_MIB_TLSTXREKEYERROR),
SNMP_MIB_ITEM("TlsRxRekeyReceived", LINUX_MIB_TLSRXREKEYRECEIVED),
- SNMP_MIB_SENTINEL
};
static int tls_statistics_seq_show(struct seq_file *seq, void *v)
{
- unsigned long buf[LINUX_MIB_TLSMAX] = {};
+ unsigned long buf[ARRAY_SIZE(tls_mib_list)];
+ const int cnt = ARRAY_SIZE(tls_mib_list);
struct net *net = seq->private;
int i;
- snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
- for (i = 0; tls_mib_list[i].name; i++)
+ memset(buf, 0, sizeof(buf));
+ snmp_get_cpu_field_batch_cnt(buf, tls_mib_list, cnt,
+ net->mib.tls_statistics);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
return 0;
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index 65b0da6fdf6a..98e12f0ff57e 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -13,7 +13,7 @@
static struct workqueue_struct *tls_strp_wq;
-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+void tls_strp_abort_strp(struct tls_strparser *strp, int err)
{
if (strp->stopped)
return;
@@ -211,11 +211,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
struct sk_buff *in_skb, unsigned int offset,
size_t in_len)
{
+ unsigned int nfrag = skb->len / PAGE_SIZE;
size_t len, chunk;
skb_frag_t *frag;
int sz;
- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
+ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return -EMSGSIZE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[nfrag];
len = in_len;
/* First make sure we got the header */
@@ -475,7 +481,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
strp->stm.offset = offset;
}
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
{
struct strp_msg *rxm;
struct tls_msg *tlm;
@@ -484,8 +490,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
if (!strp->copy_mode && force_refresh) {
- if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
- return;
+ if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) {
+ WRITE_ONCE(strp->msg_ready, 0);
+ memset(&strp->stm, 0, sizeof(strp->stm));
+ return false;
+ }
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
}
@@ -495,6 +504,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
rxm->offset = strp->stm.offset;
tlm = tls_msg(strp->anchor);
tlm->control = strp->mark;
+
+ return true;
}
/* Called with lock held on lower socket */
@@ -512,14 +523,11 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
if (inq < strp->stm.full_len)
return tls_strp_read_copy(strp, true);
+ tls_strp_load_anchor_with_queue(strp, inq);
if (!strp->stm.full_len) {
- tls_strp_load_anchor_with_queue(strp, inq);
-
sz = tls_rx_msg_size(strp, strp->anchor);
- if (sz < 0) {
- tls_strp_abort_strp(strp, sz);
+ if (sz < 0)
return sz;
- }
strp->stm.full_len = sz;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index fc88e34b7f33..d17135369980 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -872,6 +872,19 @@ more_data:
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
delta -= msg->sg.size;
+
+ if ((s32)delta > 0) {
+ /* It indicates that we executed bpf_msg_pop_data(),
+ * causing the plaintext data size to decrease.
+ * Therefore the encrypted data size also needs to
+ * correspondingly decrease. We only need to subtract
+ * delta to calculate the new ciphertext length since
+ * ktls does not support block encryption.
+ */
+ struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
+
+ sk_msg_trim(sk, enc, enc->sg.size - delta);
+ }
}
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
@@ -1041,7 +1054,7 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
if (ret == -EINPROGRESS)
num_async++;
else if (ret != -EAGAIN)
- goto send_end;
+ goto end;
}
}
@@ -1099,8 +1112,11 @@ alloc_encrypted:
goto send_end;
tls_ctx->pending_open_record_frags = true;
- if (sk_msg_full(msg_pl))
+ if (sk_msg_full(msg_pl)) {
full_record = true;
+ sk_msg_trim(sk, msg_en,
+ msg_pl->sg.size + prot->overhead_size);
+ }
if (full_record || eor)
goto copied;
@@ -1136,6 +1152,13 @@ alloc_encrypted:
} else if (ret != -EAGAIN)
goto send_end;
}
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
+ }
+
continue;
rollback_iter:
copied -= try_to_copy;
@@ -1191,6 +1214,12 @@ copied:
goto send_end;
}
}
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, msg->msg_flags);
+ }
}
continue;
@@ -1210,8 +1239,9 @@ trim_sgl:
goto alloc_encrypted;
}
+send_end:
if (!num_async) {
- goto send_end;
+ goto end;
} else if (num_zc || eor) {
int err;
@@ -1229,7 +1259,7 @@ trim_sgl:
tls_tx_records(sk, msg->msg_flags);
}
-send_end:
+end:
ret = sk_stream_error(sk, msg->msg_flags, ret);
return copied > 0 ? copied : ret;
}
@@ -1371,7 +1401,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
return sock_intr_errno(timeo);
}
- tls_strp_msg_load(&ctx->strp, released);
+ if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
+ return tls_rx_rec_wait(sk, psock, nonblock, false);
return 1;
}
@@ -1623,8 +1654,10 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
if (unlikely(darg->async)) {
err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
- if (err)
- __skb_queue_tail(&ctx->async_hold, darg->skb);
+ if (err) {
+ err = tls_decrypt_async_wait(ctx);
+ darg->async = false;
+ }
return err;
}
@@ -1794,6 +1827,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
+/* All records returned from a recvmsg() call must have the same type.
+ * 0 is not a valid content type. Use it as "no type reported, yet".
+ */
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
@@ -2037,8 +2073,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0)
goto end;
+ /* process_rx_list() will set @control if it processed any records */
copied = err;
- if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
+ if (len <= copied || rx_more ||
+ (control && control != TLS_RECORD_TYPE_DATA))
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
@@ -2455,8 +2493,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
return data_len + TLS_HEADER_SIZE;
read_failure:
- tls_err_abort(strp->sk, ret);
-
+ tls_strp_abort_strp(strp, ret);
return ret;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 22e170fb5dda..768098dec231 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -646,9 +646,6 @@ static void unix_sock_destructor(struct sock *sk)
return;
}
- if (sk->sk_peer_pid)
- pidfs_put_pid(sk->sk_peer_pid);
-
if (u->addr)
unix_release_addr(u->addr);
@@ -660,6 +657,11 @@ static void unix_sock_destructor(struct sock *sk)
#endif
}
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
+
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
@@ -694,10 +696,16 @@ static void unix_release_sock(struct sock *sk, int embrion)
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (skb && !unix_skb_len(skb))
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
+#endif
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
+ if (skb || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -769,7 +777,6 @@ static void drop_peercred(struct unix_peercred *peercred)
swap(peercred->peer_pid, pid);
swap(peercred->peer_cred, cred);
- pidfs_put_pid(pid);
put_pid(pid);
put_cred(cred);
}
@@ -802,7 +809,6 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
spin_lock(&sk->sk_peer_lock);
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
- pidfs_get_pid(sk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
spin_unlock(&sk->sk_peer_lock);
}
@@ -923,6 +929,52 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
#define unix_show_fdinfo NULL
#endif
+static bool unix_custom_sockopt(int optname)
+{
+ switch (optname) {
+ case SO_INQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int unix_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct unix_sock *u = unix_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ int val;
+
+ if (level != SOL_SOCKET)
+ return -EOPNOTSUPP;
+
+ if (!unix_custom_sockopt(optname))
+ return sock_setsockopt(sock, level, optname, optval, optlen);
+
+ if (optlen != sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ switch (optname) {
+ case SO_INQ:
+ if (sk->sk_type != SOCK_STREAM)
+ return -EINVAL;
+
+ if (val > 1 || val < 0)
+ return -EINVAL;
+
+ WRITE_ONCE(u->recvmsg_inq, val);
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
+}
+
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
@@ -939,6 +991,7 @@ static const struct proto_ops unix_stream_ops = {
#endif
.listen = unix_listen,
.shutdown = unix_shutdown,
+ .setsockopt = unix_setsockopt,
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
.read_skb = unix_stream_read_skb,
@@ -1105,6 +1158,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
switch (sock->type) {
case SOCK_STREAM:
+ set_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
sock->ops = &unix_stream_ops;
break;
/*
@@ -1333,7 +1387,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
* Get the parent directory, calculate the hash for last
* component.
*/
- dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
+ dentry = start_creating_path(AT_FDCWD, addr->name->sun_path, &parent, 0);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out;
@@ -1363,7 +1417,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
unix_table_double_unlock(net, old_hash, new_hash);
unix_insert_bsd_socket(sk);
mutex_unlock(&u->bindlock);
- done_path_create(&parent, dentry);
+ end_creating_path(&parent, dentry);
return 0;
out_unlock:
@@ -1373,7 +1427,7 @@ out_unlink:
/* failed after successful mknod? unlink what we'd created... */
vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
out_path:
- done_path_create(&parent, dentry);
+ end_creating_path(&parent, dentry);
out:
unix_release_addr(addr);
return err == -EEXIST ? -EADDRINUSE : err;
@@ -1836,6 +1890,9 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
skb_free_datagram(sk, skb);
wake_up_interruptible(&unix_sk(sk)->peer_wait);
+ if (tsk->sk_type == SOCK_STREAM)
+ set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
+
/* attach accepted sock to socket */
unix_state_lock(tsk);
unix_update_edges(unix_sk(tsk));
@@ -1934,7 +1991,7 @@ static void unix_destruct_scm(struct sk_buff *skb)
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
- scm.pid = UNIXCB(skb).pid;
+ scm.pid = UNIXCB(skb).pid;
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
@@ -1960,22 +2017,46 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
return err;
}
-/*
+static void unix_skb_to_scm(struct sk_buff *skb, struct scm_cookie *scm)
+{
+ scm_set_cred(scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
+ unix_set_secdata(scm, skb);
+}
+
+/**
+ * unix_maybe_add_creds() - Adds current task uid/gid and struct pid to skb if needed.
+ * @skb: skb to attach creds to.
+ * @sk: Sender sock.
+ * @other: Receiver sock.
+ *
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
* asserted SOCK_PASSCRED.
+ *
+ * Context: May sleep.
+ * Return: On success zero, on error a negative error code is returned.
*/
-static void unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
- const struct sock *other)
+static int unix_maybe_add_creds(struct sk_buff *skb, const struct sock *sk,
+ const struct sock *other)
{
if (UNIXCB(skb).pid)
- return;
+ return 0;
if (unix_may_passcred(sk) || unix_may_passcred(other) ||
!other->sk_socket) {
- UNIXCB(skb).pid = get_pid(task_tgid(current));
+ struct pid *pid;
+ int err;
+
+ pid = task_tgid(current);
+ err = pidfs_register_pid(pid);
+ if (unlikely(err))
+ return err;
+
+ UNIXCB(skb).pid = get_pid(pid);
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
+
+ return 0;
}
static bool unix_skb_scm_eq(struct sk_buff *skb,
@@ -2110,6 +2191,10 @@ lookup:
goto out_sock_put;
}
+ err = unix_maybe_add_creds(skb, sk, other);
+ if (err)
+ goto out_sock_put;
+
restart:
sk_locked = 0;
unix_state_lock(other);
@@ -2218,7 +2303,6 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
- unix_maybe_add_creds(skb, sk, other);
scm_stat_add(other, skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
@@ -2262,6 +2346,10 @@ static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
if (err < 0)
goto out;
+ err = unix_maybe_add_creds(skb, sk, other);
+ if (err)
+ goto out;
+
skb_put(skb, 1);
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
@@ -2281,11 +2369,11 @@ static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
goto out_unlock;
}
- unix_maybe_add_creds(skb, sk, other);
scm_stat_add(other, skb);
spin_lock(&other->sk_receive_queue.lock);
WRITE_ONCE(ousk->oob_skb, skb);
+ WRITE_ONCE(ousk->inq_len, ousk->inq_len + 1);
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
@@ -2308,6 +2396,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
struct sock *sk = sock->sk;
struct sk_buff *skb = NULL;
struct sock *other = NULL;
+ struct unix_sock *otheru;
struct scm_cookie scm;
bool fds_sent = false;
int err, sent = 0;
@@ -2331,14 +2420,16 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_namelen) {
err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
- } else {
- other = unix_peer(sk);
- if (!other) {
- err = -ENOTCONN;
- goto out_err;
- }
}
+ other = unix_peer(sk);
+ if (!other) {
+ err = -ENOTCONN;
+ goto out_err;
+ }
+
+ otheru = unix_sk(other);
+
if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
goto out_pipe;
@@ -2375,10 +2466,13 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
fds_sent = true;
+ err = unix_maybe_add_creds(skb, sk, other);
+ if (err)
+ goto out_free;
+
if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- err = skb_splice_from_iter(skb, &msg->msg_iter, size,
- sk->sk_allocation);
+ err = skb_splice_from_iter(skb, &msg->msg_iter, size);
if (err < 0)
goto out_free;
@@ -2405,9 +2499,13 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto out_free;
}
- unix_maybe_add_creds(skb, sk, other);
scm_stat_add(other, skb);
- skb_queue_tail(&other->sk_receive_queue, skb);
+
+ spin_lock(&other->sk_receive_queue.lock);
+ WRITE_ONCE(otheru->inq_len, otheru->inq_len + skb->len);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
+
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
@@ -2517,12 +2615,10 @@ int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
&err, &timeo, last));
if (!skb) { /* implies iolock unlocked */
- unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN))
err = 0;
- unix_state_unlock(sk);
goto out;
}
@@ -2553,8 +2649,7 @@ int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
memset(&scm, 0, sizeof(scm));
- scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
- unix_set_secdata(&scm, skb);
+ unix_skb_to_scm(skb, &scm);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
@@ -2661,11 +2756,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
return timeo;
}
-static unsigned int unix_skb_len(const struct sk_buff *skb)
-{
- return skb->len - UNIXCB(skb).consumed;
-}
-
struct unix_stream_read_state {
int (*recv_actor)(struct sk_buff *, int, int,
struct unix_stream_read_state *);
@@ -2680,11 +2770,11 @@ struct unix_stream_read_state {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
static int unix_stream_recv_urg(struct unix_stream_read_state *state)
{
+ struct sk_buff *oob_skb, *read_skb = NULL;
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int chunk = 1;
- struct sk_buff *oob_skb;
mutex_lock(&u->iolock);
unix_state_lock(sk);
@@ -2699,8 +2789,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
oob_skb = u->oob_skb;
- if (!(state->flags & MSG_PEEK))
+ if (!(state->flags & MSG_PEEK)) {
WRITE_ONCE(u->oob_skb, NULL);
+ WRITE_ONCE(u->inq_len, u->inq_len - 1);
+
+ if (oob_skb->prev != (struct sk_buff *)&sk->sk_receive_queue &&
+ !unix_skb_len(oob_skb->prev)) {
+ read_skb = oob_skb->prev;
+ __skb_unlink(read_skb, &sk->sk_receive_queue);
+ }
+ }
spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
@@ -2712,6 +2810,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
mutex_unlock(&u->iolock);
+ consume_skb(read_skb);
+
if (chunk < 0)
return -EFAULT;
@@ -2774,6 +2874,7 @@ unlock:
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb;
int err;
@@ -2781,60 +2882,57 @@ static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
- mutex_lock(&u->iolock);
- skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
- mutex_unlock(&u->iolock);
- if (!skb)
+ err = sock_error(sk);
+ if (err)
return err;
-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
- if (unlikely(skb == READ_ONCE(u->oob_skb))) {
- bool drop = false;
-
- unix_state_lock(sk);
+ mutex_lock(&u->iolock);
+ spin_lock(&queue->lock);
- if (sock_flag(sk, SOCK_DEAD)) {
- unix_state_unlock(sk);
- kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE);
- return -ECONNRESET;
- }
+ skb = __skb_dequeue(queue);
+ if (!skb) {
+ spin_unlock(&queue->lock);
+ mutex_unlock(&u->iolock);
+ return -EAGAIN;
+ }
- spin_lock(&sk->sk_receive_queue.lock);
- if (likely(skb == u->oob_skb)) {
- WRITE_ONCE(u->oob_skb, NULL);
- drop = true;
- }
- spin_unlock(&sk->sk_receive_queue.lock);
+ WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
- unix_state_unlock(sk);
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (skb == u->oob_skb) {
+ WRITE_ONCE(u->oob_skb, NULL);
+ spin_unlock(&queue->lock);
+ mutex_unlock(&u->iolock);
- if (drop) {
- kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
- return -EAGAIN;
- }
+ kfree_skb_reason(skb, SKB_DROP_REASON_UNIX_SKIP_OOB);
+ return -EAGAIN;
}
#endif
+ spin_unlock(&queue->lock);
+ mutex_unlock(&u->iolock);
+
return recv_actor(sk, skb);
}
static int unix_stream_read_generic(struct unix_stream_read_state *state,
bool freezable)
{
- struct scm_cookie scm;
+ int noblock = state->flags & MSG_DONTWAIT;
struct socket *sock = state->socket;
+ struct msghdr *msg = state->msg;
struct sock *sk = sock->sk;
- struct unix_sock *u = unix_sk(sk);
- int copied = 0;
+ size_t size = state->size;
int flags = state->flags;
- int noblock = flags & MSG_DONTWAIT;
bool check_creds = false;
- int target;
+ struct scm_cookie scm;
+ unsigned int last_len;
+ struct unix_sock *u;
+ int copied = 0;
int err = 0;
long timeo;
+ int target;
int skip;
- size_t size = state->size;
- unsigned int last_len;
if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
@@ -2854,6 +2952,8 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
memset(&scm, 0, sizeof(scm));
+ u = unix_sk(sk);
+
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
@@ -2939,20 +3039,17 @@ unlock:
break;
} else if (unix_may_passcred(sk)) {
/* Copy credentials */
- scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
- unix_set_secdata(&scm, skb);
+ unix_skb_to_scm(skb, &scm);
check_creds = true;
}
/* Copy address just once */
- if (state->msg && state->msg->msg_name) {
- DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
- state->msg->msg_name);
- unix_copy_addr(state->msg, skb->sk);
+ if (msg && msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
- BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk,
- state->msg->msg_name,
- &state->msg->msg_namelen);
+ unix_copy_addr(msg, skb->sk);
+ BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, msg->msg_name,
+ &msg->msg_namelen);
sunaddr = NULL;
}
@@ -2981,7 +3078,11 @@ unlock:
if (unix_skb_len(skb))
break;
- skb_unlink(skb, &sk->sk_receive_queue);
+ spin_lock(&sk->sk_receive_queue.lock);
+ WRITE_ONCE(u->inq_len, u->inq_len - skb->len);
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
consume_skb(skb);
if (scm.fp)
@@ -3010,10 +3111,17 @@ unlock:
} while (size);
mutex_unlock(&u->iolock);
- if (state->msg)
- scm_recv_unix(sock, state->msg, &scm, flags);
- else
+ if (msg) {
+ scm_recv_unix(sock, msg, &scm, flags);
+
+ if (READ_ONCE(u->recvmsg_inq) || msg->msg_get_inq) {
+ msg->msg_inq = READ_ONCE(u->inq_len);
+ put_cmsg(msg, SOL_SOCKET, SCM_INQ,
+ sizeof(msg->msg_inq), &msg->msg_inq);
+ }
+ } else {
scm_destroy(&scm);
+ }
out:
return copied ? : err;
}
@@ -3152,9 +3260,11 @@ long unix_inq_len(struct sock *sk)
if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
+ if (sk->sk_type == SOCK_STREAM)
+ return READ_ONCE(unix_sk(sk)->inq_len);
+
spin_lock(&sk->sk_receive_queue.lock);
- if (sk->sk_type == SOCK_STREAM ||
- sk->sk_type == SOCK_SEQPACKET) {
+ if (sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
amount += unix_skb_len(skb);
} else {
@@ -3176,7 +3286,6 @@ EXPORT_SYMBOL_GPL(unix_outq_len);
static int unix_open_file(struct sock *sk)
{
- struct path path;
struct file *f;
int fd;
@@ -3186,27 +3295,20 @@ static int unix_open_file(struct sock *sk)
if (!smp_load_acquire(&unix_sk(sk)->addr))
return -ENOENT;
- path = unix_sk(sk)->path;
- if (!path.dentry)
+ if (!unix_sk(sk)->path.dentry)
return -ENOENT;
- path_get(&path);
-
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
- goto out;
+ return fd;
- f = dentry_open(&path, O_PATH, current_cred());
+ f = dentry_open(&unix_sk(sk)->path, O_PATH, current_cred());
if (IS_ERR(f)) {
put_unused_fd(fd);
- fd = PTR_ERR(f);
- goto out;
+ return PTR_ERR(f);
}
fd_install(fd, f);
-out:
- path_put(&path);
-
return fd;
}
@@ -3682,7 +3784,7 @@ static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = unix_prog_seq_show(prog, &meta, v, uid);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 79b182d0e62a..ca3473026151 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -106,7 +106,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
struct user_namespace *user_ns)
{
- uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ uid_t uid = from_kuid_munged(user_ns, sk_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 01e2b9452c75..684ab03137b6 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -592,7 +592,7 @@ static DECLARE_WORK(unix_gc_work, __unix_gc);
void unix_gc(void)
{
WRITE_ONCE(gc_in_progress, true);
- queue_work(system_unbound_wq, &unix_gc_work);
+ queue_work(system_dfl_wq, &unix_gc_work);
}
#define UNIX_INFLIGHT_TRIGGER_GC 16000
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index 56356d2980c8..8e803c4828c4 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -72,7 +72,7 @@ config VIRTIO_VSOCKETS_COMMON
config HYPERV_VSOCKETS
tristate "Hyper-V transport for Virtual Sockets"
- depends on VSOCKETS && HYPERV
+ depends on VSOCKETS && HYPERV_VMBUS
help
This module implements a Hyper-V transport for Virtual Sockets.
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2e7a3034e965..76763247a377 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
static bool vsock_use_local_transport(unsigned int remote_cid)
{
+ lockdep_assert_held(&vsock_register_mutex);
+
if (!transport_local)
return false;
@@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
remote_flags = vsk->remote_addr.svm_flags;
+ mutex_lock(&vsock_register_mutex);
+
switch (sk->sk_type) {
case SOCK_DGRAM:
new_transport = transport_dgram;
@@ -479,13 +483,30 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
new_transport = transport_h2g;
break;
default:
- return -ESOCKTNOSUPPORT;
+ ret = -ESOCKTNOSUPPORT;
+ goto err;
}
- if (vsk->transport) {
- if (vsk->transport == new_transport)
- return 0;
+ if (vsk->transport && vsk->transport == new_transport) {
+ ret = 0;
+ goto err;
+ }
+
+ /* We increase the module refcnt to prevent the transport unloading
+ * while there are open sockets assigned to it.
+ */
+ if (!new_transport || !try_module_get(new_transport->module)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* It's safe to release the mutex after a successful try_module_get().
+ * Whichever transport `new_transport` points at, it won't go away until
+ * the last module_put() below or in vsock_deassign_transport().
+ */
+ mutex_unlock(&vsock_register_mutex);
+ if (vsk->transport) {
/* transport->release() must be called with sock lock acquired.
* This path can only be taken during vsock_connect(), where we
* have already held the sock lock. In the other cases, this
@@ -505,12 +526,6 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
vsk->peer_shutdown = 0;
}
- /* We increase the module refcnt to prevent the transport unloading
- * while there are open sockets assigned to it.
- */
- if (!new_transport || !try_module_get(new_transport->module))
- return -ENODEV;
-
if (sk->sk_type == SOCK_SEQPACKET) {
if (!new_transport->seqpacket_allow ||
!new_transport->seqpacket_allow(remote_cid)) {
@@ -528,12 +543,31 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
vsk->transport = new_transport;
return 0;
+err:
+ mutex_unlock(&vsock_register_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(vsock_assign_transport);
+/*
+ * Provide safe access to static transport_{h2g,g2h,dgram,local} callbacks.
+ * Otherwise we may race with module removal. Do not use on `vsk->transport`.
+ */
+static u32 vsock_registered_transport_cid(const struct vsock_transport **transport)
+{
+ u32 cid = VMADDR_CID_ANY;
+
+ mutex_lock(&vsock_register_mutex);
+ if (*transport)
+ cid = (*transport)->get_local_cid();
+ mutex_unlock(&vsock_register_mutex);
+
+ return cid;
+}
+
bool vsock_find_cid(unsigned int cid)
{
- if (transport_g2h && cid == transport_g2h->get_local_cid())
+ if (cid == vsock_registered_transport_cid(&transport_g2h))
return true;
if (transport_h2g && cid == VMADDR_CID_HOST)
@@ -655,7 +689,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
unsigned int i;
for (i = 0; i < MAX_PORT_RETRIES; i++) {
- if (port <= LAST_RESERVED_PORT)
+ if (port == VMADDR_PORT_ANY ||
+ port <= LAST_RESERVED_PORT)
port = LAST_RESERVED_PORT + 1;
new_addr.svm_port = port++;
@@ -994,17 +1029,7 @@ static int vsock_getname(struct socket *sock,
vm_addr = &vsk->local_addr;
}
- if (!vm_addr) {
- err = -EINVAL;
- goto out;
- }
-
- /* sys_getsockname() and sys_getpeername() pass us a
- * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
- * that macro is defined in socket.c instead of .h, so we hardcode its
- * value here.
- */
- BUILD_BUG_ON(sizeof(*vm_addr) > 128);
+ BUILD_BUG_ON(sizeof(*vm_addr) > sizeof(struct sockaddr_storage));
memcpy(addr, vm_addr, sizeof(*vm_addr));
err = sizeof(*vm_addr);
@@ -1389,6 +1414,28 @@ static int vsock_do_ioctl(struct socket *sock, unsigned int cmd,
vsk = vsock_sk(sk);
switch (cmd) {
+ case SIOCINQ: {
+ ssize_t n_bytes;
+
+ if (!vsk->transport) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (sock_type_connectible(sk->sk_type) &&
+ sk->sk_state == TCP_LISTEN) {
+ ret = -EINVAL;
+ break;
+ }
+
+ n_bytes = vsock_stream_has_data(vsk);
+ if (n_bytes < 0) {
+ ret = n_bytes;
+ break;
+ }
+ ret = put_user(n_bytes, arg);
+ break;
+ }
case SIOCOUTQ: {
ssize_t n_bytes;
@@ -1602,7 +1649,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
* reschedule it, then ungrab the socket refcount to
* keep it balanced.
*/
- if (mod_delayed_work(system_wq, &vsk->connect_work,
+ if (mod_delayed_work(system_percpu_wq, &vsk->connect_work,
timeout))
sock_put(sk);
@@ -2536,18 +2583,19 @@ static long vsock_dev_do_ioctl(struct file *filp,
unsigned int cmd, void __user *ptr)
{
u32 __user *p = ptr;
- u32 cid = VMADDR_CID_ANY;
int retval = 0;
+ u32 cid;
switch (cmd) {
case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
/* To be compatible with the VMCI behavior, we prioritize the
* guest CID instead of well-know host CID (VMADDR_CID_HOST).
*/
- if (transport_g2h)
- cid = transport_g2h->get_local_cid();
- else if (transport_h2g)
- cid = transport_h2g->get_local_cid();
+ cid = vsock_registered_transport_cid(&transport_g2h);
+ if (cid == VMADDR_CID_ANY)
+ cid = vsock_registered_transport_cid(&transport_h2g);
+ if (cid == VMADDR_CID_ANY)
+ cid = vsock_registered_transport_cid(&transport_local);
if (put_user(cid, p) != 0)
retval = -EFAULT;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 31342ab502b4..432fcbbd14d4 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -694,15 +694,26 @@ out:
static s64 hvs_stream_has_data(struct vsock_sock *vsk)
{
struct hvsock *hvs = vsk->trans;
+ bool need_refill;
s64 ret;
if (hvs->recv_data_len > 0)
- return 1;
+ return hvs->recv_data_len;
switch (hvs_channel_readable_payload(hvs->chan)) {
case 1:
- ret = 1;
- break;
+ need_refill = !hvs->recv_desc;
+ if (!need_refill)
+ return -EIO;
+
+ hvs->recv_desc = hv_pkt_iter_first(hvs->chan);
+ if (!hvs->recv_desc)
+ return -ENOBUFS;
+
+ ret = hvs_update_recv_data(hvs);
+ if (ret)
+ return ret;
+ return hvs->recv_data_len;
case 0:
vsk->peer_shutdown |= SEND_SHUTDOWN;
ret = 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index f0e48e6911fc..8c867023a2e5 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -307,7 +307,7 @@ out_rcu:
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
- int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
+ int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
struct scatterlist pkt, *p;
struct virtqueue *vq;
struct sk_buff *skb;
@@ -316,7 +316,7 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
vq = vsock->vqs[VSOCK_VQ_RX];
do {
- skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
+ skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL);
if (!skb)
break;
@@ -624,8 +624,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
do {
virtqueue_disable_cb(vq);
for (;;) {
+ unsigned int len, payload_len;
+ struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
- unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
@@ -642,13 +643,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
vsock->rx_buf_nr--;
/* Drop short/long packets */
- if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
+ if (unlikely(len < sizeof(*hdr) ||
len > virtio_vsock_skb_len(skb))) {
kfree_skb(skb);
continue;
}
- virtio_vsock_skb_rx_put(skb);
+ hdr = virtio_vsock_hdr(skb);
+ payload_len = le32_to_cpu(hdr->len);
+ if (unlikely(payload_len > len - sizeof(*hdr))) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (payload_len)
+ virtio_vsock_skb_put(skb, payload_len);
+
virtio_transport_deliver_tap_pkt(skb);
virtio_transport_recv_pkt(&virtio_transport, skb);
}
@@ -916,7 +926,7 @@ static int __init virtio_vsock_init(void)
{
int ret;
- virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+ virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", WQ_PERCPU, 0);
if (!virtio_vsock_workqueue)
return -ENOMEM;
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 1b5d9896edae..dcc8a1d5851e 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -105,11 +105,14 @@ static int virtio_transport_fill_skb(struct sk_buff *skb,
size_t len,
bool zcopy)
{
+ struct msghdr *msg = info->msg;
+
if (zcopy)
- return __zerocopy_sg_from_iter(info->msg, NULL, skb,
- &info->msg->msg_iter, len, NULL);
+ return __zerocopy_sg_from_iter(msg, NULL, skb,
+ &msg->msg_iter, len, NULL);
- return memcpy_from_msg(skb_put(skb, len), info->msg, len);
+ virtio_vsock_skb_put(skb, len);
+ return skb_copy_datagram_from_iter_full(skb, 0, &msg->msg_iter, len);
}
static void virtio_transport_init_hdr(struct sk_buff *skb,
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index b370070194fa..7eccd6708d66 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
u16 proto,
struct vmci_handle handle)
{
+ memset(pkt, 0, sizeof(*pkt));
+
/* We register the stream control handler as an any cid handle so we
* must always send from a source address of VMADDR_CID_ANY
*/
@@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
pkt->type = type;
pkt->src_port = src->svm_port;
pkt->dst_port = dst->svm_port;
- memset(&pkt->proto, 0, sizeof(pkt->proto));
- memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
switch (pkt->type) {
case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
diff --git a/net/vmw_vsock/vsock_loopback.c b/net/vmw_vsock/vsock_loopback.c
index 6e78927a598e..bc2ff918b315 100644
--- a/net/vmw_vsock/vsock_loopback.c
+++ b/net/vmw_vsock/vsock_loopback.c
@@ -139,7 +139,7 @@ static int __init vsock_loopback_init(void)
struct vsock_loopback *vsock = &the_vsock_loopback;
int ret;
- vsock->workqueue = alloc_workqueue("vsock-loopback", 0, 0);
+ vsock->workqueue = alloc_workqueue("vsock-loopback", WQ_PERCPU, 0);
if (!vsock->workqueue)
return -ENOMEM;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 193734b7f9dc..68221b1ab45e 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -100,6 +100,11 @@ static u32 cfg80211_get_end_freq(const struct cfg80211_chan_def *chandef,
punctured = 0) : (punctured >>= 1))) \
if (!(punctured & 1))
+#define for_each_s1g_subchan(chandef, freq_khz) \
+ for (freq_khz = cfg80211_s1g_get_start_freq_khz(chandef); \
+ freq_khz <= cfg80211_s1g_get_end_freq_khz(chandef); \
+ freq_khz += MHZ_TO_KHZ(1))
+
struct cfg80211_per_bw_puncturing_values {
u8 len;
const u16 *valid_values;
@@ -336,8 +341,7 @@ static bool cfg80211_valid_center_freq(u32 center,
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
{
- u32 control_freq, oper_freq;
- int oper_width, control_width;
+ u32 control_freq, control_freq_khz, start_khz, end_khz;
if (!chandef->chan)
return false;
@@ -363,27 +367,16 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
case NL80211_CHAN_WIDTH_4:
case NL80211_CHAN_WIDTH_8:
case NL80211_CHAN_WIDTH_16:
- if (chandef->chan->band != NL80211_BAND_S1GHZ)
- return false;
-
- control_freq = ieee80211_channel_to_khz(chandef->chan);
- oper_freq = ieee80211_chandef_to_khz(chandef);
- control_width = nl80211_chan_width_to_mhz(
- ieee80211_s1g_channel_width(
- chandef->chan));
- oper_width = cfg80211_chandef_get_width(chandef);
-
- if (oper_width < 0 || control_width < 0)
+ if (!cfg80211_chandef_is_s1g(chandef))
return false;
if (chandef->center_freq2)
return false;
- if (control_freq + MHZ_TO_KHZ(control_width) / 2 >
- oper_freq + MHZ_TO_KHZ(oper_width) / 2)
- return false;
+ control_freq_khz = ieee80211_channel_to_khz(chandef->chan);
+ start_khz = cfg80211_s1g_get_start_freq_khz(chandef);
+ end_khz = cfg80211_s1g_get_end_freq_khz(chandef);
- if (control_freq - MHZ_TO_KHZ(control_width) / 2 <
- oper_freq - MHZ_TO_KHZ(oper_width) / 2)
+ if (control_freq_khz < start_khz || control_freq_khz > end_khz)
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
@@ -461,6 +454,9 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
!cfg80211_edmg_chandef_valid(chandef))
return false;
+ if (!cfg80211_chandef_is_s1g(chandef) && chandef->s1g_primary_2mhz)
+ return false;
+
return valid_puncturing_bitmap(chandef);
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
@@ -725,6 +721,10 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
{
struct ieee80211_channel *c;
+ /* DFS is not required for S1G */
+ if (cfg80211_chandef_is_s1g(chandef))
+ return 0;
+
for_each_subchan(chandef, freq, cf) {
c = ieee80211_get_channel_khz(wiphy, freq);
if (!c)
@@ -1130,6 +1130,55 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
return true;
}
+static bool cfg80211_s1g_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
+{
+ u32 freq_khz;
+ const struct ieee80211_channel *chan;
+ u32 pri_khz = ieee80211_channel_to_khz(chandef->chan);
+ u32 end_khz = cfg80211_s1g_get_end_freq_khz(chandef);
+ u32 start_khz = cfg80211_s1g_get_start_freq_khz(chandef);
+ int width_mhz = cfg80211_chandef_get_width(chandef);
+ u32 prohibited_flags = IEEE80211_CHAN_DISABLED;
+
+ if (width_mhz >= 16)
+ prohibited_flags |= IEEE80211_CHAN_NO_16MHZ;
+ if (width_mhz >= 8)
+ prohibited_flags |= IEEE80211_CHAN_NO_8MHZ;
+ if (width_mhz >= 4)
+ prohibited_flags |= IEEE80211_CHAN_NO_4MHZ;
+
+ if (chandef->chan->flags & IEEE80211_CHAN_S1G_NO_PRIMARY)
+ return false;
+
+ if (pri_khz < start_khz || pri_khz > end_khz)
+ return false;
+
+ for_each_s1g_subchan(chandef, freq_khz) {
+ chan = ieee80211_get_channel_khz(wiphy, freq_khz);
+ if (!chan || (chan->flags & prohibited_flags))
+ return false;
+ }
+
+ if (chandef->s1g_primary_2mhz) {
+ u32 sib_khz;
+ const struct ieee80211_channel *sibling;
+
+ sibling = cfg80211_s1g_get_primary_sibling(wiphy, chandef);
+ if (!sibling)
+ return false;
+
+ if (sibling->flags & IEEE80211_CHAN_S1G_NO_PRIMARY)
+ return false;
+
+ sib_khz = ieee80211_channel_to_khz(sibling);
+ if (sib_khz < start_khz || sib_khz > end_khz)
+ return false;
+ }
+
+ return true;
+}
+
bool _cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
u32 prohibited_flags,
@@ -1154,6 +1203,9 @@ bool _cfg80211_chandef_usable(struct wiphy *wiphy,
ext_nss_cap = __le16_to_cpu(vht_cap->vht_mcs.tx_highest) &
IEEE80211_VHT_EXT_NSS_BW_CAPABLE;
+ if (cfg80211_chandef_is_s1g(chandef))
+ return cfg80211_s1g_usable(wiphy, chandef);
+
if (edmg_cap->channels &&
!cfg80211_edmg_usable(wiphy,
chandef->edmg.channels,
@@ -1165,21 +1217,6 @@ bool _cfg80211_chandef_usable(struct wiphy *wiphy,
control_freq = chandef->chan->center_freq;
switch (chandef->width) {
- case NL80211_CHAN_WIDTH_1:
- width = 1;
- break;
- case NL80211_CHAN_WIDTH_2:
- width = 2;
- break;
- case NL80211_CHAN_WIDTH_4:
- width = 4;
- break;
- case NL80211_CHAN_WIDTH_8:
- width = 8;
- break;
- case NL80211_CHAN_WIDTH_16:
- width = 16;
- break;
case NL80211_CHAN_WIDTH_5:
width = 5;
break;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5c3c72df0591..797f9f2004a6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -239,7 +239,7 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
rdev->opencount--;
- if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (rdev->scan_req && rdev->scan_req->req.wdev == wdev) {
if (WARN_ON(!rdev->scan_req->notified &&
(!rdev->int_scan_req ||
!rdev->int_scan_req->notified)))
@@ -995,11 +995,38 @@ int wiphy_register(struct wiphy *wiphy)
wiphy->max_num_akm_suites > CFG80211_MAX_NUM_AKM_SUITES)
return -EINVAL;
+ /* Allocate radio configuration space for multi-radio wiphy */
+ if (wiphy->n_radio > 0) {
+ int idx;
+
+ wiphy->radio_cfg = kcalloc(wiphy->n_radio,
+ sizeof(*wiphy->radio_cfg),
+ GFP_KERNEL);
+ if (!wiphy->radio_cfg)
+ return -ENOMEM;
+ /*
+ * Initialize wiphy radio parameters to IEEE 802.11
+ * MIB default values. RTS threshold is disabled by
+ * default with the special -1 value.
+ */
+ for (idx = 0; idx < wiphy->n_radio; idx++)
+ wiphy->radio_cfg[idx].rts_threshold = (u32)-1;
+ }
+
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH;
+ if (rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_CTWINDOW)
+ rdev->wiphy.features |= NL80211_FEATURE_P2P_GO_CTWIN;
+ else if (rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)
+ rdev->wiphy.bss_param_support |= WIPHY_BSS_PARAM_P2P_CTWINDOW;
+ if (rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS)
+ rdev->wiphy.features |= NL80211_FEATURE_P2P_GO_OPPPS;
+ else if (rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)
+ rdev->wiphy.bss_param_support |= WIPHY_BSS_PARAM_P2P_OPPPS;
+
rtnl_lock();
wiphy_lock(&rdev->wiphy);
res = device_add(&rdev->wiphy.dev);
@@ -1222,6 +1249,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
void wiphy_free(struct wiphy *wiphy)
{
+ kfree(wiphy->radio_cfg);
put_device(&wiphy->dev);
}
EXPORT_SYMBOL(wiphy_free);
@@ -1555,7 +1583,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
case NETDEV_DOWN:
wiphy_lock(&rdev->wiphy);
cfg80211_update_iface_num(rdev, wdev->iftype, -1);
- if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (rdev->scan_req && rdev->scan_req->req.wdev == wdev) {
if (WARN_ON(!rdev->scan_req->notified &&
(!rdev->int_scan_req ||
!rdev->int_scan_req->notified)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c56a35040caa..b6bd7f4d6385 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -21,6 +21,13 @@
#define WIPHY_IDX_INVALID -1
+struct cfg80211_scan_request_int {
+ struct cfg80211_scan_info info;
+ bool notified;
+ /* must be last - variable members */
+ struct cfg80211_scan_request req;
+};
+
struct cfg80211_registered_device {
const struct cfg80211_ops *ops;
struct list_head list;
@@ -70,8 +77,8 @@ struct cfg80211_registered_device {
struct rb_root bss_tree;
u32 bss_generation;
u32 bss_entries;
- struct cfg80211_scan_request *scan_req; /* protected by RTNL */
- struct cfg80211_scan_request *int_scan_req;
+ struct cfg80211_scan_request_int *scan_req; /* protected by RTNL */
+ struct cfg80211_scan_request_int *int_scan_req;
struct sk_buff *scan_msg;
struct list_head sched_scan_req_list;
time64_t suspend_at;
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 2613d6ac0fda..46e4317cbd7e 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -23,7 +23,7 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
else
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strscpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
+ strscpy(info->bus_info, dev_name(pdev),
sizeof(info->bus_info));
}
EXPORT_SYMBOL(cfg80211_get_drvinfo);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 05d44a443518..46394eb2086f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -352,8 +352,25 @@ cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
return -EINVAL;
}
- if (ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_a) !=
- ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_b)) {
+ /*
+ * Only verify the values in Extended MLD Capabilities that are
+ * not reserved when transmitted by an AP (and expected to remain the
+ * same over time).
+ * The Recommended Max Simultaneous Links subfield in particular is
+ * reserved when included in a unicast Probe Response frame and may
+ * also change when the AP adds/removes links. The BTM MLD
+ * Recommendation For Multiple APs Support subfield is reserved when
+ * transmitted by an AP. All other bits are currently reserved.
+ * See IEEE P802.11be/D7.0, Table 9-417o.
+ */
+ if ((ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_a) &
+ (IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE |
+ IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE |
+ IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK)) !=
+ (ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_b) &
+ (IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE |
+ IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE |
+ IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK))) {
NL_SET_ERR_MSG(extack,
"extended link MLD capabilities/ops mismatch");
return -EINVAL;
@@ -850,7 +867,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
mgmt = (const struct ieee80211_mgmt *)params->buf;
- if (!ieee80211_is_mgmt(mgmt->frame_control))
+ if (!ieee80211_is_mgmt(mgmt->frame_control) ||
+ ieee80211_has_order(mgmt->frame_control))
return -EINVAL;
stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
@@ -1331,7 +1349,8 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev,
lockdep_assert_wiphy(wiphy);
trace_cfg80211_mlo_reconf_add_done(dev, data->added_links,
- data->buf, data->len);
+ data->buf, data->len,
+ data->driver_initiated);
if (WARN_ON(!wdev->valid_links))
return;
@@ -1361,11 +1380,16 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev,
wdev->links[link_id].client.current_bss =
bss_from_pub(bss);
+ if (data->driver_initiated)
+ cfg80211_hold_bss(bss_from_pub(bss));
+
memcpy(wdev->links[link_id].addr,
data->links[link_id].addr,
ETH_ALEN);
} else {
- cfg80211_unhold_bss(bss_from_pub(bss));
+ if (!data->driver_initiated)
+ cfg80211_unhold_bss(bss_from_pub(bss));
+
cfg80211_put_bss(wiphy, bss);
}
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 85f139016da2..346dfd2bd987 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -229,6 +229,7 @@ static int validate_beacon_head(const struct nlattr *attr,
unsigned int len = nla_len(attr);
const struct element *elem;
const struct ieee80211_mgmt *mgmt = (void *)data;
+ const struct ieee80211_ext *ext;
unsigned int fixedlen, hdrlen;
bool s1g_bcn;
@@ -237,8 +238,10 @@ static int validate_beacon_head(const struct nlattr *attr,
s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
if (s1g_bcn) {
- fixedlen = offsetof(struct ieee80211_ext,
- u.s1g_beacon.variable);
+ ext = (struct ieee80211_ext *)mgmt;
+ fixedlen =
+ offsetof(struct ieee80211_ext, u.s1g_beacon.variable) +
+ ieee80211_s1g_optional_len(ext->frame_control);
hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon);
} else {
fixedlen = offsetof(struct ieee80211_mgmt,
@@ -309,6 +312,26 @@ static int validate_supported_selectors(const struct nlattr *attr,
return 0;
}
+static int validate_nan_cluster_id(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *data = nla_data(attr);
+ unsigned int len = nla_len(attr);
+ static const u8 cluster_id_prefix[4] = {0x50, 0x6f, 0x9a, 0x1};
+
+ if (len != ETH_ALEN) {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "bad cluster id length");
+ return -EINVAL;
+ }
+
+ if (memcmp(data, cluster_id_prefix, sizeof(cluster_id_prefix))) {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "invalid cluster id prefix");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* policy for the attributes */
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR];
@@ -408,6 +431,14 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8,
NL80211_RATE_INFO_HE_1XLTF,
NL80211_RATE_INFO_HE_4XLTF),
+ [NL80211_TXRATE_EHT] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_eht)),
+ [NL80211_TXRATE_EHT_GI] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_EHT_GI_0_8,
+ NL80211_RATE_INFO_EHT_GI_3_2),
+ [NL80211_TXRATE_EHT_LTF] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_EHT_1XLTF,
+ NL80211_RATE_INFO_EHT_8XLTF),
+
};
static const struct nla_policy
@@ -479,6 +510,46 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
};
+static const struct nla_policy
+nl80211_s1g_short_beacon[NL80211_S1G_SHORT_BEACON_ATTR_MAX + 1] = {
+ [NL80211_S1G_SHORT_BEACON_ATTR_HEAD] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_S1G_SHORT_BEACON_ATTR_TAIL] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+};
+
+static const struct nla_policy
+nl80211_nan_band_conf_policy[NL80211_NAN_BAND_CONF_ATTR_MAX + 1] = {
+ [NL80211_NAN_BAND_CONF_BAND] = NLA_POLICY_MAX(NLA_U8,
+ NUM_NL80211_BANDS - 1),
+ [NL80211_NAN_BAND_CONF_FREQ] = { .type = NLA_U16 },
+ [NL80211_NAN_BAND_CONF_RSSI_CLOSE] = NLA_POLICY_MIN(NLA_S8, -59),
+ [NL80211_NAN_BAND_CONF_RSSI_MIDDLE] = NLA_POLICY_MIN(NLA_S8, -74),
+ [NL80211_NAN_BAND_CONF_WAKE_DW] = NLA_POLICY_MAX(NLA_U8, 5),
+ [NL80211_NAN_BAND_CONF_DISABLE_SCAN] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy
+nl80211_nan_conf_policy[NL80211_NAN_CONF_ATTR_MAX + 1] = {
+ [NL80211_NAN_CONF_CLUSTER_ID] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_nan_cluster_id,
+ ETH_ALEN),
+ [NL80211_NAN_CONF_EXTRA_ATTRS] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN},
+ [NL80211_NAN_CONF_VENDOR_ELEMS] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_NAN_CONF_BAND_CONFIGS] =
+ NLA_POLICY_NESTED_ARRAY(nl80211_nan_band_conf_policy),
+ [NL80211_NAN_CONF_SCAN_PERIOD] = { .type = NLA_U16 },
+ [NL80211_NAN_CONF_SCAN_DWELL_TIME] = NLA_POLICY_RANGE(NLA_U16, 50, 512),
+ [NL80211_NAN_CONF_DISCOVERY_BEACON_INTERVAL] =
+ NLA_POLICY_RANGE(NLA_U8, 50, 200),
+ [NL80211_NAN_CONF_NOTIFY_DW] = { .type = NLA_FLAG },
+};
+
static const struct netlink_range_validation nl80211_punct_bitmap_range = {
.min = 0,
.max = 0xffff,
@@ -748,6 +819,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_ATTR_NAN_MASTER_PREF] = NLA_POLICY_MIN(NLA_U8, 1),
[NL80211_ATTR_BANDS] = { .type = NLA_U32 },
+ [NL80211_ATTR_NAN_CONFIG] = NLA_POLICY_NESTED(nl80211_nan_conf_policy),
[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
.len = FILS_MAX_KEK_LEN },
@@ -854,6 +926,12 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 },
[NL80211_ATTR_EPCS] = { .type = NLA_FLAG },
[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS] = { .type = NLA_U16 },
+ [NL80211_ATTR_WIPHY_RADIO_INDEX] = { .type = NLA_U8 },
+ [NL80211_ATTR_S1G_LONG_BEACON_PERIOD] = NLA_POLICY_MIN(NLA_U8, 2),
+ [NL80211_ATTR_S1G_SHORT_BEACON] =
+ NLA_POLICY_NESTED(nl80211_s1g_short_beacon),
+ [NL80211_ATTR_BSS_PARAM] = { .type = NLA_FLAG },
+ [NL80211_ATTR_S1G_PRIMARY_2MHZ] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -1202,21 +1280,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_NO_HE) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_1MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_1MHZ))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_2MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_2MHZ))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_4MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_4MHZ))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_8MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_8MHZ))
- goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_16MHZ) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ))
- goto nla_put_failure;
if ((chan->flags & IEEE80211_CHAN_NO_320MHZ) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_320MHZ))
goto nla_put_failure;
@@ -1242,6 +1305,15 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
nla_put_flag(msg,
NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY))
goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_4MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_4MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_8MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_8MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_16MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_16MHZ))
+ goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -2446,6 +2518,7 @@ fail:
static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
{
const struct wiphy_radio *r = &wiphy->radio[idx];
+ const struct wiphy_radio_cfg *rcfg = &wiphy->radio_cfg[idx];
struct nlattr *radio, *freq;
int i;
@@ -2456,6 +2529,11 @@ static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx))
goto nla_put_failure;
+ if (rcfg->rts_threshold &&
+ nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD,
+ rcfg->rts_threshold))
+ goto nla_put_failure;
+
if (r->antenna_mask &&
nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
r->antenna_mask))
@@ -2522,6 +2600,41 @@ fail:
return -ENOBUFS;
}
+static int nl80211_put_nan_capa(struct wiphy *wiphy, struct sk_buff *msg)
+{
+ struct nlattr *nan_caps;
+
+ nan_caps = nla_nest_start(msg, NL80211_ATTR_NAN_CAPABILITIES);
+ if (!nan_caps)
+ return -ENOBUFS;
+
+ if (wiphy->nan_capa.flags & WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC &&
+ nla_put_flag(msg, NL80211_NAN_CAPA_CONFIGURABLE_SYNC))
+ goto fail;
+
+ if ((wiphy->nan_capa.flags & WIPHY_NAN_FLAGS_USERSPACE_DE) &&
+ nla_put_flag(msg, NL80211_NAN_CAPA_USERSPACE_DE))
+ goto fail;
+
+ if (nla_put_u8(msg, NL80211_NAN_CAPA_OP_MODE,
+ wiphy->nan_capa.op_mode) ||
+ nla_put_u8(msg, NL80211_NAN_CAPA_NUM_ANTENNAS,
+ wiphy->nan_capa.n_antennas) ||
+ nla_put_u16(msg, NL80211_NAN_CAPA_MAX_CHANNEL_SWITCH_TIME,
+ wiphy->nan_capa.max_channel_switch_time) ||
+ nla_put_u8(msg, NL80211_NAN_CAPA_CAPABILITIES,
+ wiphy->nan_capa.dev_capabilities))
+ goto fail;
+
+ nla_nest_end(msg, nan_caps);
+
+ return 0;
+
+fail:
+ nla_nest_cancel(msg, nan_caps);
+ return -ENOBUFS;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -2639,7 +2752,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
u32 tx_ant = 0, rx_ant = 0;
int res;
- res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
+ res = rdev_get_antenna(rdev, -1, &tx_ant, &rx_ant);
if (!res) {
if (nla_put_u32(msg,
NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -2996,6 +3109,40 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.ext_features))
goto nla_put_failure;
+ if (rdev->wiphy.bss_param_support) {
+ struct nlattr *nested;
+ u32 parsup = rdev->wiphy.bss_param_support;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_BSS_PARAM);
+ if (!nested)
+ goto nla_put_failure;
+
+ if ((parsup & WIPHY_BSS_PARAM_CTS_PROT) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_CTS_PROT))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_SHORT_PREAMBLE) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_SHORT_PREAMBLE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_SHORT_SLOT_TIME) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_SHORT_SLOT_TIME))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_BASIC_RATES) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_BASIC_RATES))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_AP_ISOLATE) &&
+ nla_put_flag(msg, NL80211_ATTR_AP_ISOLATE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_HT_OPMODE) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_HT_OPMODE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_P2P_CTWINDOW) &&
+ nla_put_flag(msg, NL80211_ATTR_P2P_CTWINDOW))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_P2P_OPPPS) &&
+ nla_put_flag(msg, NL80211_ATTR_P2P_OPPPS))
+ goto nla_put_failure;
+ nla_nest_end(msg, nested);
+ }
if (rdev->wiphy.bss_select_support) {
struct nlattr *nested;
u32 bss_select_support = rdev->wiphy.bss_select_support;
@@ -3140,6 +3287,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (nl80211_put_radios(&rdev->wiphy, msg))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 18:
+ if (nl80211_put_nan_capa(&rdev->wiphy, msg))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -3383,6 +3536,7 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->center_freq1 = KHZ_TO_MHZ(control_freq);
chandef->freq1_offset = control_freq % 1000;
chandef->center_freq2 = 0;
+ chandef->s1g_primary_2mhz = false;
if (!chandef->chan) {
NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
@@ -3426,27 +3580,20 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
} else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) {
- chandef->width =
- nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
- if (chandef->chan->band == NL80211_BAND_S1GHZ) {
- /* User input error for channel width doesn't match channel */
- if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) {
- NL_SET_ERR_MSG_ATTR(extack,
- attrs[NL80211_ATTR_CHANNEL_WIDTH],
- "bad channel width");
- return -EINVAL;
- }
- }
+ chandef->width = nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]);
if (attrs[NL80211_ATTR_CENTER_FREQ1]) {
chandef->center_freq1 =
nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]);
- chandef->freq1_offset =
- nla_get_u32_default(attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET],
- 0);
+ chandef->freq1_offset = nla_get_u32_default(
+ attrs[NL80211_ATTR_CENTER_FREQ1_OFFSET], 0);
}
+
if (attrs[NL80211_ATTR_CENTER_FREQ2])
chandef->center_freq2 =
nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]);
+
+ chandef->s1g_primary_2mhz = nla_get_flag(
+ attrs[NL80211_ATTR_S1G_PRIMARY_2MHZ]);
}
if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
@@ -3608,6 +3755,33 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
return __nl80211_set_channel(rdev, netdev, info, link_id);
}
+static int nl80211_set_wiphy_radio(struct genl_info *info,
+ struct cfg80211_registered_device *rdev,
+ int radio_idx)
+{
+ u32 rts_threshold = 0, old_rts, changed = 0;
+ int result;
+
+ if (!rdev->ops->set_wiphy_params)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) {
+ rts_threshold = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]);
+ changed |= WIPHY_PARAM_RTS_THRESHOLD;
+ }
+
+ old_rts = rdev->wiphy.radio_cfg[radio_idx].rts_threshold;
+
+ rdev->wiphy.radio_cfg[radio_idx].rts_threshold = rts_threshold;
+
+ result = rdev_set_wiphy_params(rdev, radio_idx, changed);
+ if (result)
+ rdev->wiphy.radio_cfg[radio_idx].rts_threshold = old_rts;
+
+ return 0;
+}
+
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = NULL;
@@ -3620,6 +3794,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
u32 frag_threshold = 0, rts_threshold = 0;
u8 coverage_class = 0;
u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0;
+ int radio_idx = -1;
rtnl_lock();
/*
@@ -3670,6 +3845,19 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (result)
return result;
+ if (info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]) {
+ /* Radio idx is not expected for non-multi radio wiphy */
+ if (rdev->wiphy.n_radio <= 0)
+ return -EINVAL;
+
+ radio_idx = nla_get_u8(
+ info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]);
+ if (radio_idx >= rdev->wiphy.n_radio)
+ return -EINVAL;
+
+ return nl80211_set_wiphy_radio(info, rdev, radio_idx);
+ }
+
if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
struct ieee80211_txq_params txq_params;
struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1];
@@ -3759,7 +3947,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
mbm = nla_get_u32(info->attrs[idx]);
}
- result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
+ result = rdev_set_tx_power(rdev, txp_wdev, radio_idx, type,
+ mbm);
if (result)
return result;
}
@@ -3785,7 +3974,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
- result = rdev_set_antenna(rdev, tx_ant, rx_ant);
+ result = rdev_set_antenna(rdev, radio_idx, tx_ant, rx_ant);
if (result)
return result;
}
@@ -3879,16 +4068,30 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (changed) {
u8 old_retry_short, old_retry_long;
u32 old_frag_threshold, old_rts_threshold;
- u8 old_coverage_class;
+ u8 old_coverage_class, i;
u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum;
+ u32 *old_radio_rts_threshold = NULL;
if (!rdev->ops->set_wiphy_params)
return -EOPNOTSUPP;
+ if (rdev->wiphy.n_radio) {
+ old_radio_rts_threshold = kcalloc(rdev->wiphy.n_radio,
+ sizeof(u32),
+ GFP_KERNEL);
+ if (!old_radio_rts_threshold)
+ return -ENOMEM;
+ }
+
old_retry_short = rdev->wiphy.retry_short;
old_retry_long = rdev->wiphy.retry_long;
old_frag_threshold = rdev->wiphy.frag_threshold;
old_rts_threshold = rdev->wiphy.rts_threshold;
+ if (old_radio_rts_threshold) {
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ old_radio_rts_threshold[i] =
+ rdev->wiphy.radio_cfg[i].rts_threshold;
+ }
old_coverage_class = rdev->wiphy.coverage_class;
old_txq_limit = rdev->wiphy.txq_limit;
old_txq_memory_limit = rdev->wiphy.txq_memory_limit;
@@ -3900,8 +4103,13 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.retry_long = retry_long;
if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
rdev->wiphy.frag_threshold = frag_threshold;
- if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ if ((changed & WIPHY_PARAM_RTS_THRESHOLD) &&
+ old_radio_rts_threshold) {
rdev->wiphy.rts_threshold = rts_threshold;
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ rdev->wiphy.radio_cfg[i].rts_threshold =
+ rdev->wiphy.rts_threshold;
+ }
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
rdev->wiphy.coverage_class = coverage_class;
if (changed & WIPHY_PARAM_TXQ_LIMIT)
@@ -3911,18 +4119,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (changed & WIPHY_PARAM_TXQ_QUANTUM)
rdev->wiphy.txq_quantum = txq_quantum;
- result = rdev_set_wiphy_params(rdev, changed);
+ result = rdev_set_wiphy_params(rdev, radio_idx, changed);
if (result) {
rdev->wiphy.retry_short = old_retry_short;
rdev->wiphy.retry_long = old_retry_long;
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
+ if (old_radio_rts_threshold) {
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ rdev->wiphy.radio_cfg[i].rts_threshold =
+ old_radio_rts_threshold[i];
+ }
rdev->wiphy.coverage_class = old_coverage_class;
rdev->wiphy.txq_limit = old_txq_limit;
rdev->wiphy.txq_memory_limit = old_txq_memory_limit;
rdev->wiphy.txq_quantum = old_txq_quantum;
- return result;
}
+
+ if (old_rts_threshold)
+ kfree(old_radio_rts_threshold);
+ return result;
}
return 0;
@@ -4012,7 +4228,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_tx_power && !wdev->valid_links) {
int dbm, ret;
- ret = rdev_get_tx_power(rdev, wdev, 0, &dbm);
+ ret = rdev_get_tx_power(rdev, wdev, -1, 0, &dbm);
if (ret == 0 &&
nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
DBM_TO_MBM(dbm)))
@@ -4084,7 +4300,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_tx_power) {
int dbm, ret;
- ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm);
+ ret = rdev_get_tx_power(rdev, wdev, -1, link_id, &dbm);
if (ret == 0 &&
nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
DBM_TO_MBM(dbm)))
@@ -5301,6 +5517,164 @@ static bool he_set_mcs_mask(struct genl_info *info,
return true;
}
+static void eht_build_mcs_mask(struct genl_info *info,
+ const struct ieee80211_sta_eht_cap *eht_cap,
+ u8 mcs_nss_len, u16 *mcs_mask)
+{
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u8 nss, mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0;
+ unsigned int link_id = nl80211_link_id(info->attrs);
+
+ if (mcs_nss_len == 4) {
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs =
+ &eht_cap->eht_mcs_nss_supp.only_20mhz;
+
+ mcs_7 = u8_get_bits(mcs->rx_tx_mcs7_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_9 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_11 = u8_get_bits(mcs->rx_tx_mcs11_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_13 = u8_get_bits(mcs->rx_tx_mcs13_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ } else {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs;
+ enum nl80211_chan_width width;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ width = wdev->u.ibss.chandef.width;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ width = wdev->u.mesh.chandef.width;
+ break;
+ case NL80211_IFTYPE_OCB:
+ width = wdev->u.ocb.chandef.width;
+ break;
+ default:
+ if (wdev->valid_links)
+ width = wdev->links[link_id].ap.chandef.width;
+ else
+ width = wdev->u.ap.preset_chandef.width;
+ break;
+ }
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_320:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._320;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._160;
+ break;
+ default:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._80;
+ break;
+ }
+
+ mcs_7 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_9 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_11 = u8_get_bits(mcs->rx_tx_mcs11_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_13 = u8_get_bits(mcs->rx_tx_mcs13_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ }
+
+ /* Enable MCS 14 for NSS 0 */
+ if (eht_cap->eht_cap_elem.phy_cap_info[6] &
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP)
+ mcs_mask[0] |= 0x4000;
+
+ /* Enable MCS 15 for NSS 0 */
+ mcs_mask[0] |= 0x8000;
+
+ for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) {
+ if (!mcs_7)
+ continue;
+ mcs_mask[nss] |= 0x00FF;
+ mcs_7--;
+
+ if (!mcs_9)
+ continue;
+ mcs_mask[nss] |= 0x0300;
+ mcs_9--;
+
+ if (!mcs_11)
+ continue;
+ mcs_mask[nss] |= 0x0C00;
+ mcs_11--;
+
+ if (!mcs_13)
+ continue;
+ mcs_mask[nss] |= 0x3000;
+ mcs_13--;
+ }
+}
+
+static bool eht_set_mcs_mask(struct genl_info *info, struct wireless_dev *wdev,
+ struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_eht *txrate,
+ u16 mcs[NL80211_EHT_NSS_MAX])
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ u16 tx_mcs_mask[NL80211_EHT_NSS_MAX] = { 0 };
+ u8 i, mcs_nss_len;
+
+ he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
+ if (!he_cap)
+ return false;
+
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, wdev->iftype);
+ if (!eht_cap)
+ return false;
+
+ /* Checks for MCS 14 */
+ if (txrate->mcs[0] & 0x4000) {
+ if (sband->band != NL80211_BAND_6GHZ)
+ return false;
+
+ if (!(eht_cap->eht_cap_elem.phy_cap_info[6] &
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP))
+ return false;
+ }
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ wdev->iftype ==
+ NL80211_IFTYPE_STATION);
+
+ if (mcs_nss_len == 3) {
+ /* Supported iftypes for setting non-20 MHZ only EHT MCS */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ /* Build eht_mcs_mask from EHT and HE capabilities */
+ eht_build_mcs_mask(info, eht_cap, mcs_nss_len, tx_mcs_mask);
+
+ memset(mcs, 0, sizeof(u16) * NL80211_EHT_NSS_MAX);
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
struct nlattr *attrs[],
enum nl80211_attrs attr,
@@ -5321,6 +5695,8 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
/* Default to all rates enabled */
for (i = 0; i < NUM_NL80211_BANDS; i++) {
const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ u8 mcs_nss_len;
if (!default_all_enabled)
break;
@@ -5349,6 +5725,21 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[i].he_gi = 0xFF;
mask->control[i].he_ltf = 0xFF;
+
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, wdev->iftype);
+ if (!eht_cap)
+ continue;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ wdev->iftype ==
+ NL80211_IFTYPE_STATION);
+
+ eht_build_mcs_mask(info, eht_cap, mcs_nss_len,
+ mask->control[i].eht_mcs);
+
+ mask->control[i].eht_gi = 0xFF;
+ mask->control[i].eht_ltf = 0xFF;
}
/* if no rates are given set it back to the defaults */
@@ -5420,13 +5811,27 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[band].he_ltf =
nla_get_u8(tb[NL80211_TXRATE_HE_LTF]);
+ if (tb[NL80211_TXRATE_EHT] &&
+ !eht_set_mcs_mask(info, wdev, sband,
+ nla_data(tb[NL80211_TXRATE_EHT]),
+ mask->control[band].eht_mcs))
+ return -EINVAL;
+
+ if (tb[NL80211_TXRATE_EHT_GI])
+ mask->control[band].eht_gi =
+ nla_get_u8(tb[NL80211_TXRATE_EHT_GI]);
+ if (tb[NL80211_TXRATE_EHT_LTF])
+ mask->control[band].eht_ltf =
+ nla_get_u8(tb[NL80211_TXRATE_EHT_LTF]);
+
if (mask->control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT, VHT or HE
+ /* don't allow empty legacy rates if HT, VHT, HE or EHT
* are not even supported.
*/
if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
rdev->wiphy.bands[band]->vht_cap.vht_supported ||
- ieee80211_get_he_iftype_cap(sband, wdev->iftype)))
+ ieee80211_get_he_iftype_cap(sband, wdev->iftype) ||
+ ieee80211_get_eht_iftype_cap(sband, wdev->iftype)))
return -EINVAL;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
@@ -5441,6 +5846,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (mask->control[band].he_mcs[i])
goto out;
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++)
+ if (mask->control[band].eht_mcs[i])
+ goto out;
+
/* legacy and mcs rates may not be both empty */
return -EINVAL;
}
@@ -5454,7 +5863,7 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
enum nl80211_band band,
struct cfg80211_bitrate_mask *beacon_rate)
{
- u32 count_ht, count_vht, count_he, i;
+ u32 count_ht, count_vht, count_he, count_eht, i;
u32 rate = beacon_rate->control[band].legacy;
/* Allow only one rate */
@@ -5500,8 +5909,21 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if ((count_ht && count_vht && count_he) ||
- (!rate && !count_ht && !count_vht && !count_he))
+ count_eht = 0;
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++) {
+ if (hweight16(beacon_rate->control[band].eht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].eht_mcs[i]) {
+ count_eht++;
+ if (count_eht > 1)
+ return -EINVAL;
+ }
+ if (count_eht && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht && count_he && count_eht) ||
+ (!rate && !count_ht && !count_vht && !count_he && !count_eht))
return -EINVAL;
if (rate &&
@@ -5521,6 +5943,11 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
NL80211_EXT_FEATURE_BEACON_RATE_HE))
return -EINVAL;
+ if (count_eht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_EHT))
+ return -EINVAL;
+
return 0;
}
@@ -6123,6 +6550,41 @@ static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params
return 0;
}
+static int
+nl80211_parse_s1g_short_beacon(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_s1g_short_beacon *sb)
+{
+ struct nlattr *tb[NL80211_S1G_SHORT_BEACON_ATTR_MAX + 1];
+ int ret;
+
+ if (!rdev->wiphy.bands[NL80211_BAND_S1GHZ])
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_S1G_SHORT_BEACON_ATTR_MAX, attrs,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ /* Short beacon tail is optional (i.e might only include the TIM) */
+ if (!tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD])
+ return -EINVAL;
+
+ sb->short_head = nla_data(tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD]);
+ sb->short_head_len = nla_len(tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD]);
+ sb->short_tail_len = 0;
+
+ if (tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]) {
+ sb->short_tail =
+ nla_data(tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]);
+ sb->short_tail_len =
+ nla_len(tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]);
+ }
+
+ sb->update = true;
+ return 0;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6363,6 +6825,22 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_S1G_SHORT_BEACON]) {
+ if (!info->attrs[NL80211_ATTR_S1G_LONG_BEACON_PERIOD]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ params->s1g_long_beacon_period = nla_get_u8(
+ info->attrs[NL80211_ATTR_S1G_LONG_BEACON_PERIOD]);
+
+ err = nl80211_parse_s1g_short_beacon(
+ rdev, info->attrs[NL80211_ATTR_S1G_SHORT_BEACON],
+ &params->s1g_short_beacon);
+ if (err)
+ goto out;
+ }
+
err = nl80211_calculate_ap_params(params);
if (err)
goto out;
@@ -6471,6 +6949,14 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ attr = info->attrs[NL80211_ATTR_S1G_SHORT_BEACON];
+ if (attr) {
+ err = nl80211_parse_s1g_short_beacon(rdev, attr,
+ &params->s1g_short_beacon);
+ if (err)
+ goto out;
+ }
+
err = rdev_change_beacon(rdev, dev, params);
out:
@@ -6728,14 +7214,197 @@ static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal,
return true;
}
+static int nl80211_fill_link_station(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev,
+ struct link_station_info *link_sinfo)
+{
+ struct nlattr *bss_param, *link_sinfoattr;
+
+#define PUT_LINK_SINFO(attr, memb, type) do { \
+ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
+ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \
+ link_sinfo->memb)) \
+ goto nla_put_failure; \
+ } while (0)
+#define PUT_LINK_SINFO_U64(attr, memb) do { \
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
+ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \
+ link_sinfo->memb, NL80211_STA_INFO_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ link_sinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_STA_INFO);
+ if (!link_sinfoattr)
+ goto nla_put_failure;
+
+ PUT_LINK_SINFO(INACTIVE_TIME, inactive_time, u32);
+
+ if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) &&
+ nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+ (u32)link_sinfo->rx_bytes))
+ goto nla_put_failure;
+
+ if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+ (u32)link_sinfo->tx_bytes))
+ goto nla_put_failure;
+
+ PUT_LINK_SINFO_U64(RX_BYTES64, rx_bytes);
+ PUT_LINK_SINFO_U64(TX_BYTES64, tx_bytes);
+ PUT_LINK_SINFO_U64(RX_DURATION, rx_duration);
+ PUT_LINK_SINFO_U64(TX_DURATION, tx_duration);
+
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ PUT_LINK_SINFO(AIRTIME_WEIGHT, airtime_weight, u16);
+
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ PUT_LINK_SINFO(SIGNAL, signal, u8);
+ PUT_LINK_SINFO(SIGNAL_AVG, signal_avg, u8);
+ break;
+ default:
+ break;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) {
+ if (!nl80211_put_signal(msg, link_sinfo->chains,
+ link_sinfo->chain_signal,
+ NL80211_STA_INFO_CHAIN_SIGNAL))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
+ if (!nl80211_put_signal(msg, link_sinfo->chains,
+ link_sinfo->chain_signal_avg,
+ NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) {
+ if (!nl80211_put_sta_rate(msg, &link_sinfo->txrate,
+ NL80211_STA_INFO_TX_BITRATE))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) {
+ if (!nl80211_put_sta_rate(msg, &link_sinfo->rxrate,
+ NL80211_STA_INFO_RX_BITRATE))
+ goto nla_put_failure;
+ }
+
+ PUT_LINK_SINFO(RX_PACKETS, rx_packets, u32);
+ PUT_LINK_SINFO(TX_PACKETS, tx_packets, u32);
+ PUT_LINK_SINFO(TX_RETRIES, tx_retries, u32);
+ PUT_LINK_SINFO(TX_FAILED, tx_failed, u32);
+ PUT_LINK_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32);
+ PUT_LINK_SINFO(BEACON_LOSS, beacon_loss_count, u32);
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) {
+ bss_param = nla_nest_start_noflag(msg,
+ NL80211_STA_INFO_BSS_PARAM);
+ if (!bss_param)
+ goto nla_put_failure;
+
+ if (((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_CTS_PROT) &&
+ nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+ ((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+ nla_put_flag(msg,
+ NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+ ((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+ nla_put_flag(msg,
+ NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+ nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+ link_sinfo->bss_param.dtim_period) ||
+ nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+ link_sinfo->bss_param.beacon_interval))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, bss_param);
+ }
+
+ PUT_LINK_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
+ PUT_LINK_SINFO_U64(BEACON_RX, rx_beacon);
+ PUT_LINK_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
+ PUT_LINK_SINFO(RX_MPDUS, rx_mpdu_count, u32);
+ PUT_LINK_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32);
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) {
+ PUT_LINK_SINFO(ACK_SIGNAL, ack_signal, u8);
+ PUT_LINK_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ }
+
+#undef PUT_LINK_SINFO
+#undef PUT_LINK_SINFO_U64
+
+ if (link_sinfo->pertid) {
+ struct nlattr *tidsattr;
+ int tid;
+
+ tidsattr = nla_nest_start_noflag(msg,
+ NL80211_STA_INFO_TID_STATS);
+ if (!tidsattr)
+ goto nla_put_failure;
+
+ for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
+ struct cfg80211_tid_stats *tidstats;
+ struct nlattr *tidattr;
+
+ tidstats = &link_sinfo->pertid[tid];
+
+ if (!tidstats->filled)
+ continue;
+
+ tidattr = nla_nest_start_noflag(msg, tid + 1);
+ if (!tidattr)
+ goto nla_put_failure;
+
+#define PUT_TIDVAL_U64(attr, memb) do { \
+ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \
+ nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \
+ tidstats->memb, NL80211_TID_STATS_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_TIDVAL_U64(RX_MSDU, rx_msdu);
+ PUT_TIDVAL_U64(TX_MSDU, tx_msdu);
+ PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries);
+ PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed);
+
+#undef PUT_TIDVAL_U64
+ if ((tidstats->filled &
+ BIT(NL80211_TID_STATS_TXQ_STATS)) &&
+ !nl80211_put_txq_stats(msg, &tidstats->txq_stats,
+ NL80211_TID_STATS_TXQ_STATS))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, tidattr);
+ }
+
+ nla_nest_end(msg, tidsattr);
+ }
+
+ nla_nest_end(msg, link_sinfoattr);
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mac_addr, struct station_info *sinfo)
+ const u8 *mac_addr, struct station_info *sinfo,
+ bool link_stats)
{
void *hdr;
struct nlattr *sinfoattr, *bss_param;
+ struct link_station_info *link_sinfo;
+ struct nlattr *links, *link;
+ int link_id;
hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) {
@@ -6950,6 +7619,40 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
goto nla_put_failure;
}
+ if (link_stats && sinfo->valid_links) {
+ links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
+ if (!links)
+ goto nla_put_failure;
+
+ for_each_valid_link(sinfo, link_id) {
+ link_sinfo = sinfo->links[link_id];
+
+ if (WARN_ON_ONCE(!link_sinfo))
+ continue;
+
+ if (!is_valid_ether_addr(link_sinfo->addr))
+ continue;
+
+ link = nla_nest_start(msg, link_id + 1);
+ if (!link)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID,
+ link_id))
+ goto nla_put_failure;
+
+ if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+ link_sinfo->addr))
+ goto nla_put_failure;
+
+ if (nl80211_fill_link_station(msg, rdev, link_sinfo))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, link);
+ }
+ nla_nest_end(msg, links);
+ }
+
cfg80211_sinfo_release_content(sinfo);
genlmsg_end(msg, hdr);
return 0;
@@ -6960,6 +7663,194 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
return -EMSGSIZE;
}
+static void cfg80211_sta_set_mld_sinfo(struct station_info *sinfo)
+{
+ struct link_station_info *link_sinfo;
+ int link_id, init = 0;
+ u32 link_inactive_time;
+
+ sinfo->signal = -99;
+
+ for_each_valid_link(sinfo, link_id) {
+ link_sinfo = sinfo->links[link_id];
+ if (!link_sinfo)
+ continue;
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
+ sinfo->tx_packets += link_sinfo->tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
+ sinfo->rx_packets += link_sinfo->rx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (link_sinfo->filled &
+ (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) {
+ sinfo->tx_bytes += link_sinfo->tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
+ }
+
+ if (link_sinfo->filled &
+ (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) {
+ sinfo->rx_bytes += link_sinfo->rx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_RETRIES)) {
+ sinfo->tx_retries += link_sinfo->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) {
+ sinfo->tx_failed += link_sinfo->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) {
+ sinfo->rx_dropped_misc += link_sinfo->rx_dropped_misc;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_BEACON_LOSS)) {
+ sinfo->beacon_loss_count +=
+ link_sinfo->beacon_loss_count;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
+ sinfo->expected_throughput +=
+ link_sinfo->expected_throughput;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_MPDUS)) {
+ sinfo->rx_mpdu_count += link_sinfo->rx_mpdu_count;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_MPDUS);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT)) {
+ sinfo->fcs_err_count += link_sinfo->fcs_err_count;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_BEACON_RX)) {
+ sinfo->rx_beacon += link_sinfo->rx_beacon;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
+ }
+
+ /* Update MLO signal, signal_avg as best among links */
+ if ((link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) &&
+ link_sinfo->signal > sinfo->signal) {
+ sinfo->signal = link_sinfo->signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) &&
+ link_sinfo->signal_avg > sinfo->signal_avg) {
+ sinfo->signal_avg = link_sinfo->signal_avg;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
+ /* Update MLO inactive_time, bss_param based on least
+ * value for corresponding field of link.
+ */
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ link_inactive_time = link_sinfo->inactive_time;
+ sinfo->inactive_time = link_sinfo->inactive_time;
+ sinfo->filled |= NL80211_STA_INFO_INACTIVE_TIME;
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM) &&
+ (!init ||
+ sinfo->bss_param.dtim_period >
+ link_sinfo->bss_param.dtim_period)) {
+ sinfo->bss_param.dtim_period =
+ link_sinfo->bss_param.dtim_period;
+ sinfo->filled |= NL80211_STA_BSS_PARAM_DTIM_PERIOD;
+ sinfo->bss_param.beacon_interval =
+ link_sinfo->bss_param.beacon_interval;
+ sinfo->filled |= NL80211_STA_BSS_PARAM_BEACON_INTERVAL;
+ }
+
+ /* Update MLO rates as per last updated link rate */
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->txrate = link_sinfo->txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->rxrate = link_sinfo->rxrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_DURATION) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->tx_duration += link_sinfo->tx_duration;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+ }
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_DURATION) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->rx_duration += link_sinfo->rx_duration;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+ }
+ init++;
+
+ /* pertid stats accumulate for rx/tx fields */
+ if (sinfo->pertid) {
+ sinfo->pertid->rx_msdu +=
+ link_sinfo->pertid->rx_msdu;
+ sinfo->pertid->tx_msdu +=
+ link_sinfo->pertid->tx_msdu;
+ sinfo->pertid->tx_msdu_retries +=
+ link_sinfo->pertid->tx_msdu_retries;
+ sinfo->pertid->tx_msdu_failed +=
+ link_sinfo->pertid->tx_msdu_failed;
+
+ sinfo->pertid->filled |=
+ BIT(NL80211_TID_STATS_RX_MSDU) |
+ BIT(NL80211_TID_STATS_TX_MSDU) |
+ BIT(NL80211_TID_STATS_TX_MSDU_RETRIES) |
+ BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
+ }
+ }
+
+ /* Reset sinfo->filled bits to exclude fields which don't make
+ * much sense at the MLO level.
+ */
+ sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+}
+
static int nl80211_dump_station(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -6968,7 +7859,8 @@ static int nl80211_dump_station(struct sk_buff *skb,
struct wireless_dev *wdev;
u8 mac_addr[ETH_ALEN];
int sta_idx = cb->args[2];
- int err;
+ bool sinfo_alloc = false;
+ int err, i;
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
if (err)
@@ -6988,6 +7880,17 @@ static int nl80211_dump_station(struct sk_buff *skb,
while (1) {
memset(&sinfo, 0, sizeof(sinfo));
+
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ sinfo.links[i] =
+ kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL);
+ if (!sinfo.links[i]) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ sinfo_alloc = true;
+ }
+
err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
@@ -6995,11 +7898,19 @@ static int nl80211_dump_station(struct sk_buff *skb,
if (err)
goto out_err;
+ if (sinfo.valid_links)
+ cfg80211_sta_set_mld_sinfo(&sinfo);
+
+ /* reset the sinfo_alloc flag as nl80211_send_station()
+ * always releases sinfo
+ */
+ sinfo_alloc = false;
+
if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wdev->netdev, mac_addr,
- &sinfo) < 0)
+ &sinfo, false) < 0)
goto out;
sta_idx++;
@@ -7009,6 +7920,8 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[2] = sta_idx;
err = skb->len;
out_err:
+ if (sinfo_alloc)
+ cfg80211_sinfo_release_content(&sinfo);
wiphy_unlock(&rdev->wiphy);
return err;
@@ -7021,7 +7934,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
struct station_info sinfo;
struct sk_buff *msg;
u8 *mac_addr = NULL;
- int err;
+ int err, i;
memset(&sinfo, 0, sizeof(sinfo));
@@ -7033,9 +7946,19 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->get_station)
return -EOPNOTSUPP;
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ sinfo.links[i] = kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL);
+ if (!sinfo.links[i]) {
+ cfg80211_sinfo_release_content(&sinfo);
+ return -ENOMEM;
+ }
+ }
+
err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
- if (err)
+ if (err) {
+ cfg80211_sinfo_release_content(&sinfo);
return err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
@@ -7043,9 +7966,12 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
}
+ if (sinfo.valid_links)
+ cfg80211_sta_set_mld_sinfo(&sinfo);
+
if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION,
info->snd_portid, info->snd_seq, 0,
- rdev, dev, mac_addr, &sinfo) < 0) {
+ rdev, dev, mac_addr, &sinfo, false) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -7349,6 +8275,10 @@ static int nl80211_set_station_tdls(struct genl_info *info,
}
}
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY])
+ params->link_sta_params.s1g_capa =
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]);
+
err = nl80211_parse_sta_channel_info(info, params);
if (err)
return err;
@@ -7675,6 +8605,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.link_sta_params.he_6ghz_capa =
nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY])
+ params.link_sta_params.s1g_capa =
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]);
+
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.link_sta_params.opmode_notif_used = true;
params.link_sta_params.opmode_notif =
@@ -8231,6 +9165,9 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct bss_parameters params;
+ u32 bss_param_support = rdev->wiphy.bss_param_support;
+ u32 changed = 0;
+ bool strict;
memset(&params, 0, sizeof(params));
params.link_id = nl80211_link_id_or_invalid(info->attrs);
@@ -8243,26 +9180,54 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.p2p_ctwindow = -1;
params.p2p_opp_ps = -1;
- if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
+ strict = nla_get_flag(info->attrs[NL80211_ATTR_BSS_PARAM]);
+ if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_CTS_PROT))
+ return -EINVAL;
params.use_cts_prot =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
- if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
+ changed |= WIPHY_BSS_PARAM_CTS_PROT;
+ }
+ if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_SHORT_PREAMBLE))
+ return -EINVAL;
params.use_short_preamble =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
- if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
+ changed |= WIPHY_BSS_PARAM_SHORT_PREAMBLE;
+ }
+ if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_SHORT_SLOT_TIME))
+ return -EINVAL;
params.use_short_slot_time =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
+ changed |= WIPHY_BSS_PARAM_SHORT_SLOT_TIME;
+ }
if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_BASIC_RATES))
+ return -EINVAL;
params.basic_rates =
nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
params.basic_rates_len =
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ changed |= WIPHY_BSS_PARAM_BASIC_RATES;
}
- if (info->attrs[NL80211_ATTR_AP_ISOLATE])
- params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
- if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE])
+ if (info->attrs[NL80211_ATTR_AP_ISOLATE]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_AP_ISOLATE))
+ return -EINVAL;
+ params.ap_isolate =
+ !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
+ changed |= WIPHY_BSS_PARAM_AP_ISOLATE;
+ }
+ if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_HT_OPMODE))
+ return -EINVAL;
params.ht_opmode =
nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]);
+ changed |= WIPHY_BSS_PARAM_HT_OPMODE;
+ }
if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) {
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -8270,8 +9235,9 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.p2p_ctwindow =
nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
if (params.p2p_ctwindow != 0 &&
- !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
+ !(bss_param_support & WIPHY_BSS_PARAM_P2P_CTWINDOW))
return -EINVAL;
+ changed |= WIPHY_BSS_PARAM_P2P_CTWINDOW;
}
if (info->attrs[NL80211_ATTR_P2P_OPPPS]) {
@@ -8280,9 +9246,11 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
+ if (tmp && !(bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS))
+ return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps &&
- !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
+ !(rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS))
return -EINVAL;
}
@@ -8293,6 +9261,10 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
+ changed &= rdev->wiphy.bss_param_support;
+ if (!changed)
+ return 0;
+
return rdev_change_bss(rdev, dev, &params);
}
@@ -9243,6 +10215,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
{
unsigned int link_id;
bool all_ok = true;
+ int radio_idx;
lockdep_assert_wiphy(wdev->wiphy);
@@ -9252,8 +10225,10 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
if (!cfg80211_beaconing_iface_active(wdev))
return true;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wdev->wiphy, chan);
+
/*
- * FIXME: check if we have a free HW resource/link for chan
+ * FIXME: check if we have a free radio/link for chan
*
* This, as well as the FIXME below, requires knowing the link
* capabilities of the hardware.
@@ -9262,20 +10237,28 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
/* we cannot leave radar channels */
for_each_valid_link(wdev, link_id) {
struct cfg80211_chan_def *chandef;
+ int link_radio_idx;
chandef = wdev_chandef(wdev, link_id);
if (!chandef || !chandef->chan)
continue;
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ continue;
+
/*
- * FIXME: don't require all_ok, but rather check only the
- * correct HW resource/link onto which 'chan' falls,
- * as only that link leaves the channel for doing
- * the off-channel operation.
+ * chandef->chan is a radar channel. If the radio/link onto
+ * which this radar channel falls is the same radio/link onto
+ * which the input 'chan' falls, off-channel operation should
+ * not be allowed. Hence, set 'all_ok' to false.
*/
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR)
+ link_radio_idx = cfg80211_get_radio_idx_by_chan(wdev->wiphy,
+ chandef->chan);
+ if (link_radio_idx == radio_idx) {
all_ok = false;
+ break;
+ }
}
if (all_ok)
@@ -9296,34 +10279,12 @@ static bool nl80211_check_scan_feat(struct wiphy *wiphy, u32 flags, u32 flag,
static int
nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
- void *request, struct nlattr **attrs,
- bool is_sched_scan)
+ struct nlattr **attrs, u8 *mac_addr, u8 *mac_addr_mask,
+ u32 *flags, enum nl80211_feature_flags randomness_flag)
{
- u8 *mac_addr, *mac_addr_mask;
- u32 *flags;
- enum nl80211_feature_flags randomness_flag;
-
if (!attrs[NL80211_ATTR_SCAN_FLAGS])
return 0;
- if (is_sched_scan) {
- struct cfg80211_sched_scan_request *req = request;
-
- randomness_flag = wdev ?
- NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR :
- NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
- flags = &req->flags;
- mac_addr = req->mac_addr;
- mac_addr_mask = req->mac_addr_mask;
- } else {
- struct cfg80211_scan_request *req = request;
-
- randomness_flag = NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
- flags = &req->flags;
- mac_addr = req->mac_addr;
- mac_addr_mask = req->mac_addr_mask;
- }
-
*flags = nla_get_u32(attrs[NL80211_ATTR_SCAN_FLAGS]);
if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
@@ -9372,11 +10333,35 @@ nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev,
return 0;
}
+static int
+nl80211_check_scan_flags_sched(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct nlattr **attrs,
+ struct cfg80211_sched_scan_request *req)
+{
+ return nl80211_check_scan_flags(wiphy, wdev, attrs,
+ req->mac_addr, req->mac_addr_mask,
+ &req->flags,
+ wdev ? NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR :
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR);
+}
+
+static int
+nl80211_check_scan_flags_reg(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct nlattr **attrs,
+ struct cfg80211_scan_request_int *req)
+{
+ return nl80211_check_scan_flags(wiphy, wdev, attrs,
+ req->req.mac_addr,
+ req->req.mac_addr_mask,
+ &req->req.flags,
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR);
+}
+
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
- struct cfg80211_scan_request *request;
+ struct cfg80211_scan_request_int *request;
struct nlattr *scan_freqs = NULL;
bool scan_freqs_khz = false;
struct nlattr *attr;
@@ -9428,21 +10413,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
- size = struct_size(request, channels, n_channels);
+ size = struct_size(request, req.channels, n_channels);
ssids_offset = size;
- size = size_add(size, array_size(sizeof(*request->ssids), n_ssids));
+ size = size_add(size, array_size(sizeof(*request->req.ssids), n_ssids));
ie_offset = size;
size = size_add(size, ie_len);
request = kzalloc(size, GFP_KERNEL);
if (!request)
return -ENOMEM;
- request->n_channels = n_channels;
if (n_ssids)
- request->ssids = (void *)request + ssids_offset;
- request->n_ssids = n_ssids;
+ request->req.ssids = (void *)request + ssids_offset;
+ request->req.n_ssids = n_ssids;
if (ie_len)
- request->ie = (void *)request + ie_offset;
+ request->req.ie = (void *)request + ie_offset;
i = 0;
if (scan_freqs) {
@@ -9460,12 +10444,13 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out_free;
}
- /* ignore disabled channels */
+ /* Ignore disabled / no primary channels */
if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ chan->flags & IEEE80211_CHAN_S1G_NO_PRIMARY ||
!cfg80211_wdev_channel_allowed(wdev, chan))
continue;
- request->channels[i] = chan;
+ request->req.channels[i] = chan;
i++;
}
} else {
@@ -9483,10 +10468,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
chan = &wiphy->bands[band]->channels[j];
if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ chan->flags &
+ IEEE80211_CHAN_S1G_NO_PRIMARY ||
!cfg80211_wdev_channel_allowed(wdev, chan))
continue;
- request->channels[i] = chan;
+ request->req.channels[i] = chan;
i++;
}
}
@@ -9497,10 +10484,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out_free;
}
- request->n_channels = i;
+ request->req.n_channels = i;
- for (i = 0; i < request->n_channels; i++) {
- struct ieee80211_channel *chan = request->channels[i];
+ for (i = 0; i < request->req.n_channels; i++) {
+ struct ieee80211_channel *chan = request->req.channels[i];
/* if we can go off-channel to the target channel we're good */
if (cfg80211_off_channel_oper_allowed(wdev, chan))
@@ -9519,22 +10506,23 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto out_free;
}
- request->ssids[i].ssid_len = nla_len(attr);
- memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
+ request->req.ssids[i].ssid_len = nla_len(attr);
+ memcpy(request->req.ssids[i].ssid,
+ nla_data(attr), nla_len(attr));
i++;
}
}
if (info->attrs[NL80211_ATTR_IE]) {
- request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
- memcpy((void *)request->ie,
+ request->req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ memcpy((void *)request->req.ie,
nla_data(info->attrs[NL80211_ATTR_IE]),
- request->ie_len);
+ request->req.ie_len);
}
for (i = 0; i < NUM_NL80211_BANDS; i++)
if (wiphy->bands[i])
- request->rates[i] =
+ request->req.rates[i] =
(1 << wiphy->bands[i]->n_bitrates) - 1;
if (info->attrs[NL80211_ATTR_SCAN_SUPP_RATES]) {
@@ -9554,25 +10542,24 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
err = ieee80211_get_ratemask(wiphy->bands[band],
nla_data(attr),
nla_len(attr),
- &request->rates[band]);
+ &request->req.rates[band]);
if (err)
goto out_free;
}
}
if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) {
- request->duration =
+ request->req.duration =
nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]);
- request->duration_mandatory =
+ request->req.duration_mandatory =
nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]);
}
- err = nl80211_check_scan_flags(wiphy, wdev, request, info->attrs,
- false);
+ err = nl80211_check_scan_flags_reg(wiphy, wdev, info->attrs, request);
if (err)
goto out_free;
- request->no_cck =
+ request->req.no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
/* Initial implementation used NL80211_ATTR_MAC to set the specific
@@ -9585,19 +10572,21 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
* (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use).
*/
if (info->attrs[NL80211_ATTR_BSSID])
- memcpy(request->bssid,
+ memcpy(request->req.bssid,
nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN);
- else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) &&
+ else if (!(request->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR) &&
info->attrs[NL80211_ATTR_MAC])
- memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+ memcpy(request->req.bssid,
+ nla_data(info->attrs[NL80211_ATTR_MAC]),
ETH_ALEN);
else
- eth_broadcast_addr(request->bssid);
+ eth_broadcast_addr(request->req.bssid);
- request->tsf_report_link_id = nl80211_link_id_or_invalid(info->attrs);
- request->wdev = wdev;
- request->wiphy = &rdev->wiphy;
- request->scan_start = jiffies;
+ request->req.tsf_report_link_id =
+ nl80211_link_id_or_invalid(info->attrs);
+ request->req.wdev = wdev;
+ request->req.wiphy = &rdev->wiphy;
+ request->req.scan_start = jiffies;
rdev->scan_req = request;
err = cfg80211_scan(rdev);
@@ -10019,7 +11008,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->ie_len);
}
- err = nl80211_check_scan_flags(wiphy, wdev, request, attrs, true);
+ err = nl80211_check_scan_flags_sched(wiphy, wdev, attrs, request);
if (err)
goto out_free;
@@ -10473,6 +11462,16 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
+ if ((wdev->iftype == NL80211_IFTYPE_AP ||
+ wdev->iftype == NL80211_IFTYPE_P2P_GO) &&
+ info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
+ err = nl80211_parse_unsol_bcast_probe_resp(
+ rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
+ &params.unsol_bcast_probe_resp);
+ if (err)
+ goto free;
+ }
+
params.link_id = link_id;
err = rdev_channel_switch(rdev, dev, &params);
@@ -12775,7 +13774,9 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_IFTYPE_NAN:
if (!wiphy_ext_feature_isset(wdev->wiphy,
- NL80211_EXT_FEATURE_SECURE_NAN))
+ NL80211_EXT_FEATURE_SECURE_NAN) &&
+ !(wdev->wiphy->nan_capa.flags &
+ WIPHY_NAN_FLAGS_USERSPACE_DE))
return -EOPNOTSUPP;
break;
default:
@@ -12836,7 +13837,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_IFTYPE_NAN:
if (!wiphy_ext_feature_isset(wdev->wiphy,
- NL80211_EXT_FEATURE_SECURE_NAN))
+ NL80211_EXT_FEATURE_SECURE_NAN) &&
+ !(wdev->wiphy->nan_capa.flags &
+ WIPHY_NAN_FLAGS_USERSPACE_DE))
return -EOPNOTSUPP;
break;
default:
@@ -14483,6 +15486,216 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
return 0;
}
+static struct ieee80211_channel *nl80211_get_nan_channel(struct wiphy *wiphy,
+ int freq)
+{
+ struct ieee80211_channel *chan;
+ struct cfg80211_chan_def def;
+
+ /* Check if the frequency is valid for NAN */
+ if (freq != 5220 && freq != 5745 && freq != 2437)
+ return NULL;
+
+ chan = ieee80211_get_channel(wiphy, freq);
+ if (!chan)
+ return NULL;
+
+ cfg80211_chandef_create(&def, chan, NL80211_CHAN_NO_HT);
+
+ /* Check if the channel is allowed */
+ if (cfg80211_reg_can_beacon(wiphy, &def, NL80211_IFTYPE_NAN))
+ return chan;
+
+ return NULL;
+}
+
+static int nl80211_parse_nan_band_config(struct wiphy *wiphy,
+ struct nlattr **tb,
+ struct cfg80211_nan_band_config *cfg,
+ enum nl80211_band band)
+{
+ if (BIT(band) & ~(u32)wiphy->nan_supported_bands)
+ return -EINVAL;
+
+ if (tb[NL80211_NAN_BAND_CONF_FREQ]) {
+ u16 freq = nla_get_u16(tb[NL80211_NAN_BAND_CONF_FREQ]);
+
+ if (band != NL80211_BAND_5GHZ)
+ return -EINVAL;
+
+ cfg->chan = nl80211_get_nan_channel(wiphy, freq);
+ if (!cfg->chan)
+ return -EINVAL;
+ }
+
+ if (tb[NL80211_NAN_BAND_CONF_RSSI_CLOSE]) {
+ cfg->rssi_close =
+ nla_get_s8(tb[NL80211_NAN_BAND_CONF_RSSI_CLOSE]);
+ if (!tb[NL80211_NAN_BAND_CONF_RSSI_MIDDLE])
+ return -EINVAL;
+ }
+
+ if (tb[NL80211_NAN_BAND_CONF_RSSI_MIDDLE]) {
+ cfg->rssi_middle =
+ nla_get_s8(tb[NL80211_NAN_BAND_CONF_RSSI_MIDDLE]);
+ if (!cfg->rssi_close || cfg->rssi_middle >= cfg->rssi_close)
+ return -EINVAL;
+ }
+
+ if (tb[NL80211_NAN_BAND_CONF_WAKE_DW]) {
+ cfg->awake_dw_interval =
+ nla_get_u8(tb[NL80211_NAN_BAND_CONF_WAKE_DW]);
+
+ if (band == NL80211_BAND_2GHZ && cfg->awake_dw_interval == 0)
+ return -EINVAL;
+ }
+
+ cfg->disable_scan =
+ nla_get_flag(tb[NL80211_NAN_BAND_CONF_DISABLE_SCAN]);
+ return 0;
+}
+
+static int nl80211_parse_nan_conf(struct wiphy *wiphy,
+ struct genl_info *info,
+ struct cfg80211_nan_conf *conf,
+ u32 *changed_flags)
+{
+ struct nlattr *attrs[NL80211_NAN_CONF_ATTR_MAX + 1];
+ int err, rem;
+ u32 changed = 0;
+ struct nlattr *band_config;
+
+ if (info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) {
+ conf->master_pref =
+ nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
+
+ changed |= CFG80211_NAN_CONF_CHANGED_PREF;
+ }
+
+ if (info->attrs[NL80211_ATTR_BANDS]) {
+ u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
+
+ if (bands & ~(u32)wiphy->nan_supported_bands)
+ return -EOPNOTSUPP;
+
+ if (bands && !(bands & BIT(NL80211_BAND_2GHZ)))
+ return -EINVAL;
+
+ conf->bands = bands;
+ changed |= CFG80211_NAN_CONF_CHANGED_BANDS;
+ }
+
+ conf->band_cfgs[NL80211_BAND_2GHZ].awake_dw_interval = 1;
+ if (conf->bands & BIT(NL80211_BAND_5GHZ) || !conf->bands)
+ conf->band_cfgs[NL80211_BAND_5GHZ].awake_dw_interval = 1;
+
+ /* On 2.4 GHz band use channel 6 */
+ conf->band_cfgs[NL80211_BAND_2GHZ].chan =
+ nl80211_get_nan_channel(wiphy, 2437);
+ if (!conf->band_cfgs[NL80211_BAND_2GHZ].chan)
+ return -EINVAL;
+
+ if (!info->attrs[NL80211_ATTR_NAN_CONFIG])
+ goto out;
+
+ err = nla_parse_nested(attrs, NL80211_NAN_CONF_ATTR_MAX,
+ info->attrs[NL80211_ATTR_NAN_CONFIG], NULL,
+ info->extack);
+ if (err)
+ return err;
+
+ changed |= CFG80211_NAN_CONF_CHANGED_CONFIG;
+ if (attrs[NL80211_NAN_CONF_CLUSTER_ID])
+ conf->cluster_id =
+ nla_data(attrs[NL80211_NAN_CONF_CLUSTER_ID]);
+
+ if (attrs[NL80211_NAN_CONF_EXTRA_ATTRS]) {
+ conf->extra_nan_attrs =
+ nla_data(attrs[NL80211_NAN_CONF_EXTRA_ATTRS]);
+ conf->extra_nan_attrs_len =
+ nla_len(attrs[NL80211_NAN_CONF_EXTRA_ATTRS]);
+ }
+
+ if (attrs[NL80211_NAN_CONF_VENDOR_ELEMS]) {
+ conf->vendor_elems =
+ nla_data(attrs[NL80211_NAN_CONF_VENDOR_ELEMS]);
+ conf->vendor_elems_len =
+ nla_len(attrs[NL80211_NAN_CONF_VENDOR_ELEMS]);
+ }
+
+ if (attrs[NL80211_NAN_CONF_BAND_CONFIGS]) {
+ nla_for_each_nested(band_config,
+ attrs[NL80211_NAN_CONF_BAND_CONFIGS],
+ rem) {
+ enum nl80211_band band;
+ struct cfg80211_nan_band_config *cfg;
+ struct nlattr *tb[NL80211_NAN_BAND_CONF_ATTR_MAX + 1];
+
+ err = nla_parse_nested(tb,
+ NL80211_NAN_BAND_CONF_ATTR_MAX,
+ band_config, NULL,
+ info->extack);
+ if (err)
+ return err;
+
+ if (!tb[NL80211_NAN_BAND_CONF_BAND])
+ return -EINVAL;
+
+ band = nla_get_u8(tb[NL80211_NAN_BAND_CONF_BAND]);
+ if (conf->bands && !(conf->bands & BIT(band)))
+ return -EINVAL;
+
+ cfg = &conf->band_cfgs[band];
+
+ err = nl80211_parse_nan_band_config(wiphy, tb, cfg,
+ band);
+ if (err)
+ return err;
+ }
+ }
+
+ if (attrs[NL80211_NAN_CONF_SCAN_PERIOD])
+ conf->scan_period =
+ nla_get_u16(attrs[NL80211_NAN_CONF_SCAN_PERIOD]);
+
+ if (attrs[NL80211_NAN_CONF_SCAN_DWELL_TIME])
+ conf->scan_dwell_time =
+ nla_get_u16(attrs[NL80211_NAN_CONF_SCAN_DWELL_TIME]);
+
+ if (attrs[NL80211_NAN_CONF_DISCOVERY_BEACON_INTERVAL])
+ conf->discovery_beacon_interval =
+ nla_get_u8(attrs[NL80211_NAN_CONF_DISCOVERY_BEACON_INTERVAL]);
+
+ if (attrs[NL80211_NAN_CONF_NOTIFY_DW])
+ conf->enable_dw_notification =
+ nla_get_flag(attrs[NL80211_NAN_CONF_NOTIFY_DW]);
+
+out:
+ if (!conf->band_cfgs[NL80211_BAND_5GHZ].chan &&
+ (!conf->bands || conf->bands & BIT(NL80211_BAND_5GHZ))) {
+ /* If no 5GHz channel is specified use default, if possible */
+ conf->band_cfgs[NL80211_BAND_5GHZ].chan =
+ nl80211_get_nan_channel(wiphy, 5745);
+ if (!conf->band_cfgs[NL80211_BAND_5GHZ].chan)
+ conf->band_cfgs[NL80211_BAND_5GHZ].chan =
+ nl80211_get_nan_channel(wiphy, 5220);
+
+ /* Return error if user space asked explicitly for 5 GHz */
+ if (!conf->band_cfgs[NL80211_BAND_5GHZ].chan &&
+ conf->bands & BIT(NL80211_BAND_5GHZ)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[NL80211_ATTR_BANDS],
+ "5 GHz band operation is not allowed");
+ return -EINVAL;
+ }
+ }
+
+ if (changed_flags)
+ *changed_flags = changed;
+
+ return 0;
+}
+
static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -14499,23 +15712,13 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
if (rfkill_blocked(rdev->wiphy.rfkill))
return -ERFKILL;
+ /* Master preference is mandatory for START_NAN */
if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF])
return -EINVAL;
- conf.master_pref =
- nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
-
- if (info->attrs[NL80211_ATTR_BANDS]) {
- u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
-
- if (bands & ~(u32)wdev->wiphy->nan_supported_bands)
- return -EOPNOTSUPP;
-
- if (bands && !(bands & BIT(NL80211_BAND_2GHZ)))
- return -EINVAL;
-
- conf.bands = bands;
- }
+ err = nl80211_parse_nan_conf(&rdev->wiphy, info, &conf, NULL);
+ if (err)
+ return err;
err = rdev_start_nan(rdev, wdev, &conf);
if (err)
@@ -14871,6 +16074,7 @@ static int nl80211_nan_change_config(struct sk_buff *skb,
struct wireless_dev *wdev = info->user_ptr[1];
struct cfg80211_nan_conf conf = {};
u32 changed = 0;
+ int err;
if (wdev->iftype != NL80211_IFTYPE_NAN)
return -EOPNOTSUPP;
@@ -14878,27 +16082,9 @@ static int nl80211_nan_change_config(struct sk_buff *skb,
if (!wdev_running(wdev))
return -ENOTCONN;
- if (info->attrs[NL80211_ATTR_NAN_MASTER_PREF]) {
- conf.master_pref =
- nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
- if (conf.master_pref <= 1 || conf.master_pref == 255)
- return -EINVAL;
-
- changed |= CFG80211_NAN_CONF_CHANGED_PREF;
- }
-
- if (info->attrs[NL80211_ATTR_BANDS]) {
- u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
-
- if (bands & ~(u32)wdev->wiphy->nan_supported_bands)
- return -EOPNOTSUPP;
-
- if (bands && !(bands & BIT(NL80211_BAND_2GHZ)))
- return -EINVAL;
-
- conf.bands = bands;
- changed |= CFG80211_NAN_CONF_CHANGED_BANDS;
- }
+ err = nl80211_parse_nan_conf(&rdev->wiphy, info, &conf, &changed);
+ if (err)
+ return err;
if (!changed)
return -EINVAL;
@@ -16275,6 +17461,14 @@ static int nl80211_color_change(struct sk_buff *skb, struct genl_info *info)
params.counter_offset_presp = offset;
}
+ if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
+ err = nl80211_parse_unsol_bcast_probe_resp(
+ rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
+ &params.unsol_bcast_probe_resp);
+ if (err)
+ goto out;
+ }
+
params.link_id = nl80211_link_id(info->attrs);
err = rdev_color_change(rdev, dev, &params);
@@ -16929,6 +18123,7 @@ static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info)
if (!sar_spec)
return -ENOMEM;
+ sar_spec->num_sub_specs = specs;
sar_spec->type = type;
specs = 0;
nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) {
@@ -17899,7 +19094,7 @@ void nl80211_notify_iface(struct cfg80211_registered_device *rdev,
static int nl80211_add_scan_req(struct sk_buff *msg,
struct cfg80211_registered_device *rdev)
{
- struct cfg80211_scan_request *req = rdev->scan_req;
+ struct cfg80211_scan_request_int *req = rdev->scan_req;
struct nlattr *nest;
int i;
struct cfg80211_scan_info *info;
@@ -17910,19 +19105,20 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
nest = nla_nest_start_noflag(msg, NL80211_ATTR_SCAN_SSIDS);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < req->n_ssids; i++) {
- if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
+ for (i = 0; i < req->req.n_ssids; i++) {
+ if (nla_put(msg, i, req->req.ssids[i].ssid_len,
+ req->req.ssids[i].ssid))
goto nla_put_failure;
}
nla_nest_end(msg, nest);
- if (req->flags & NL80211_SCAN_FLAG_FREQ_KHZ) {
+ if (req->req.flags & NL80211_SCAN_FLAG_FREQ_KHZ) {
nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQ_KHZ);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < req->n_channels; i++) {
+ for (i = 0; i < req->req.n_channels; i++) {
if (nla_put_u32(msg, i,
- ieee80211_channel_to_khz(req->channels[i])))
+ ieee80211_channel_to_khz(req->req.channels[i])))
goto nla_put_failure;
}
nla_nest_end(msg, nest);
@@ -17931,19 +19127,20 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
NL80211_ATTR_SCAN_FREQUENCIES);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < req->n_channels; i++) {
- if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+ for (i = 0; i < req->req.n_channels; i++) {
+ if (nla_put_u32(msg, i,
+ req->req.channels[i]->center_freq))
goto nla_put_failure;
}
nla_nest_end(msg, nest);
}
- if (req->ie &&
- nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
+ if (req->req.ie &&
+ nla_put(msg, NL80211_ATTR_IE, req->req.ie_len, req->req.ie))
goto nla_put_failure;
- if (req->flags &&
- nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
+ if (req->req.flags &&
+ nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->req.flags))
goto nla_put_failure;
info = rdev->int_scan_req ? &rdev->int_scan_req->info :
@@ -19047,7 +20244,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
return;
if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0,
- rdev, dev, mac_addr, sinfo) < 0) {
+ rdev, dev, mac_addr, sinfo, false) < 0) {
nlmsg_free(msg);
return;
}
@@ -19077,7 +20274,7 @@ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
}
if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0,
- rdev, dev, mac_addr, sinfo) < 0) {
+ rdev, dev, mac_addr, sinfo, false) < 0) {
nlmsg_free(msg);
return;
}
@@ -19123,7 +20320,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
EXPORT_SYMBOL(cfg80211_conn_failed);
static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
- const u8 *addr, gfp_t gfp)
+ const u8 *addr, int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -19146,7 +20343,9 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+ (link_id >= 0 &&
+ nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -19158,13 +20357,13 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
return true;
}
-bool cfg80211_rx_spurious_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp)
+bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
bool ret;
- trace_cfg80211_rx_spurious_frame(dev, addr);
+ trace_cfg80211_rx_spurious_frame(dev, addr, link_id);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
@@ -19172,19 +20371,19 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
return false;
}
ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
- addr, gfp);
+ addr, link_id, gfp);
trace_cfg80211_return_bool(ret);
return ret;
}
EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
-bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp)
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
bool ret;
- trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
+ trace_cfg80211_rx_unexpected_4addr_frame(dev, addr, link_id);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO &&
@@ -19194,7 +20393,7 @@ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
}
ret = __nl80211_unexpected_frame(dev,
NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
- addr, gfp);
+ addr, link_id, gfp);
trace_cfg80211_return_bool(ret);
return ret;
}
@@ -20608,6 +21807,88 @@ void cfg80211_epcs_changed(struct net_device *netdev, bool enabled)
}
EXPORT_SYMBOL(cfg80211_epcs_changed);
+void cfg80211_next_nan_dw_notif(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, gfp_t gfp)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_next_nan_dw_notif(wdev, chan);
+
+ if (!wdev->owner_nlportid)
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0,
+ NL80211_CMD_NAN_NEXT_DW_NOTIFICATION);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD) ||
+ nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_unicast(wiphy_net(wiphy), msg, wdev->owner_nlportid);
+
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_next_nan_dw_notif);
+
+void cfg80211_nan_cluster_joined(struct wireless_dev *wdev,
+ const u8 *cluster_id, bool new_cluster,
+ gfp_t gfp)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_nan_cluster_joined(wdev, cluster_id, new_cluster);
+
+ memcpy(wdev->u.nan.cluster_id, cluster_id, ETH_ALEN);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NAN_CLUSTER_JOINED);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cluster_id) ||
+ (new_cluster && nla_put_flag(msg, NL80211_ATTR_NAN_NEW_CLUSTER)))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ if (!wdev->owner_nlportid)
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy),
+ msg, 0, NL80211_MCGRP_NAN, gfp);
+ else
+ genlmsg_unicast(wiphy_net(wiphy), msg,
+ wdev->owner_nlportid);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_nan_cluster_joined);
+
/* initialisation/exit functions */
int __init nl80211_init(void)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 9f4783c2354c..ac6884bacf3f 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -456,15 +456,15 @@ rdev_set_monitor_channel(struct cfg80211_registered_device *rdev,
}
static inline int rdev_scan(struct cfg80211_registered_device *rdev,
- struct cfg80211_scan_request *request)
+ struct cfg80211_scan_request_int *request)
{
int ret;
- if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+ if (WARN_ON_ONCE(!request->req.n_ssids && request->req.ssids))
return -EINVAL;
trace_rdev_scan(&rdev->wiphy, request);
- ret = rdev->ops->scan(&rdev->wiphy, request);
+ ret = rdev->ops->scan(&rdev->wiphy, &request->req);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -577,35 +577,40 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
}
static inline int
-rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
+rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, int radio_idx,
+ u32 changed)
{
int ret = -EOPNOTSUPP;
- trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
+ trace_rdev_set_wiphy_params(&rdev->wiphy, radio_idx, changed);
if (rdev->ops->set_wiphy_params)
- ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+ ret = rdev->ops->set_wiphy_params(&rdev->wiphy, radio_idx,
+ changed);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+ struct wireless_dev *wdev, int radio_idx,
+ enum nl80211_tx_power_setting type,
+ int mbm)
{
int ret;
- trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm);
- ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm);
+ trace_rdev_set_tx_power(&rdev->wiphy, wdev, radio_idx, type, mbm);
+ ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, radio_idx, type,
+ mbm);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, unsigned int link_id,
- int *dbm)
+ struct wireless_dev *wdev, int radio_idx,
+ unsigned int link_id, int *dbm)
{
int ret;
- trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id);
- ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm);
+ trace_rdev_get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id);
+ ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id,
+ dbm);
trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
return ret;
}
@@ -857,21 +862,21 @@ rdev_update_mgmt_frame_registrations(struct cfg80211_registered_device *rdev,
}
static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev,
- u32 tx_ant, u32 rx_ant)
+ int radio_idx, u32 tx_ant, u32 rx_ant)
{
int ret;
- trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant);
- ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+ trace_rdev_set_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant);
+ ret = rdev->ops->set_antenna(&rdev->wiphy, -1, tx_ant, rx_ant);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev,
- u32 *tx_ant, u32 *rx_ant)
+ int radio_idx, u32 *tx_ant, u32 *rx_ant)
{
int ret;
- trace_rdev_get_antenna(&rdev->wiphy);
- ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant);
+ trace_rdev_get_antenna(&rdev->wiphy, radio_idx);
+ ret = rdev->ops->get_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant);
if (ret)
trace_rdev_return_int(&rdev->wiphy, ret);
else
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c1752b31734f..73cab51f6379 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -53,7 +53,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
#include <linux/nl80211.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/verification.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
@@ -105,7 +105,7 @@ static struct regulatory_request __rcu *last_request =
(void __force __rcu *)&core_request_world;
/* To trigger userspace events and load firmware */
-static struct platform_device *reg_pdev;
+static struct faux_device *reg_fdev;
/*
* Central wireless core regulatory domains, we only need two,
@@ -583,7 +583,7 @@ static int call_crda(const char *alpha2)
else
pr_debug("Calling CRDA to update world regulatory domain\n");
- ret = kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
+ ret = kobject_uevent_env(&reg_fdev->dev.kobj, KOBJ_CHANGE, env);
if (ret)
return ret;
@@ -779,7 +779,7 @@ static bool regdb_has_valid_signature(const u8 *data, unsigned int size)
const struct firmware *sig;
bool result;
- if (request_firmware(&sig, "regulatory.db.p7s", &reg_pdev->dev))
+ if (request_firmware(&sig, "regulatory.db.p7s", &reg_fdev->dev))
return false;
result = verify_pkcs7_signature(data, size, sig->data, sig->size,
@@ -1061,7 +1061,7 @@ static int query_regdb_file(const char *alpha2)
return -ENOMEM;
err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db",
- &reg_pdev->dev, GFP_KERNEL,
+ &reg_fdev->dev, GFP_KERNEL,
(void *)alpha2, regdb_fw_cb);
if (err)
kfree(alpha2);
@@ -1077,7 +1077,7 @@ int reg_reload_regdb(void)
const struct ieee80211_regdomain *current_regdomain;
struct regulatory_request *request;
- err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev);
+ err = request_firmware(&fw, "regulatory.db", &reg_fdev->dev);
if (err)
return err;
@@ -1707,6 +1707,16 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ if (is_s1g) {
+ if (max_bandwidth_khz < MHZ_TO_KHZ(16))
+ bw_flags |= IEEE80211_CHAN_NO_16MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(8))
+ bw_flags |= IEEE80211_CHAN_NO_8MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(4))
+ bw_flags |= IEEE80211_CHAN_NO_4MHZ;
+ return bw_flags;
+ }
+
/* If we get a reg_rule we can assume that at least 5Mhz fit */
if (!cfg80211_does_bw_fit_range(freq_range,
center_freq_khz,
@@ -1717,59 +1727,19 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
MHZ_TO_KHZ(20)))
bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (is_s1g) {
- /* S1G is strict about non overlapping channels. We can
- * calculate which bandwidth is allowed per channel by finding
- * the largest bandwidth which cleanly divides the freq_range.
- */
- int edge_offset;
- int ch_bw = max_bandwidth_khz;
-
- while (ch_bw) {
- edge_offset = (center_freq_khz - ch_bw / 2) -
- freq_range->start_freq_khz;
- if (edge_offset % ch_bw == 0) {
- switch (KHZ_TO_MHZ(ch_bw)) {
- case 1:
- bw_flags |= IEEE80211_CHAN_1MHZ;
- break;
- case 2:
- bw_flags |= IEEE80211_CHAN_2MHZ;
- break;
- case 4:
- bw_flags |= IEEE80211_CHAN_4MHZ;
- break;
- case 8:
- bw_flags |= IEEE80211_CHAN_8MHZ;
- break;
- case 16:
- bw_flags |= IEEE80211_CHAN_16MHZ;
- break;
- default:
- /* If we got here, no bandwidths fit on
- * this frequency, ie. band edge.
- */
- bw_flags |= IEEE80211_CHAN_DISABLED;
- break;
- }
- break;
- }
- ch_bw /= 2;
- }
- } else {
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
- if (max_bandwidth_khz < MHZ_TO_KHZ(80))
- bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(160))
- bw_flags |= IEEE80211_CHAN_NO_160MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(320))
- bw_flags |= IEEE80211_CHAN_NO_320MHZ;
- }
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
+ bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
+ bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(320))
+ bw_flags |= IEEE80211_CHAN_NO_320MHZ;
+
return bw_flags;
}
@@ -4229,6 +4199,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
struct wireless_dev *wdev;
unsigned int link_id;
+ guard(wiphy)(&rdev->wiphy);
+
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
@@ -4300,12 +4272,12 @@ static int __init regulatory_init_db(void)
* in that case, don't try to do any further work here as
* it's doomed to lead to crashes.
*/
- if (IS_ERR_OR_NULL(reg_pdev))
+ if (!reg_fdev)
return -EINVAL;
err = load_builtin_regdb_keys();
if (err) {
- platform_device_unregister(reg_pdev);
+ faux_device_destroy(reg_fdev);
return err;
}
@@ -4313,7 +4285,7 @@ static int __init regulatory_init_db(void)
err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
if (err) {
if (err == -ENOMEM) {
- platform_device_unregister(reg_pdev);
+ faux_device_destroy(reg_fdev);
return err;
}
/*
@@ -4342,9 +4314,9 @@ late_initcall(regulatory_init_db);
int __init regulatory_init(void)
{
- reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
- if (IS_ERR(reg_pdev))
- return PTR_ERR(reg_pdev);
+ reg_fdev = faux_device_create("regulatory", NULL, NULL);
+ if (!reg_fdev)
+ return -ENODEV;
rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
@@ -4372,9 +4344,9 @@ void regulatory_exit(void)
reset_regdomains(true, NULL);
rtnl_unlock();
- dev_set_uevent_suppress(&reg_pdev->dev, true);
+ dev_set_uevent_suppress(&reg_fdev->dev, true);
- platform_device_unregister(reg_pdev);
+ faux_device_destroy(reg_fdev);
list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) {
list_del(&reg_beacon->list);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index e8a4fe44ec2d..90a9187a6b13 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -782,9 +782,9 @@ cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
}
EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_parse_colocated_ap);
-static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
- struct ieee80211_channel *chan,
- bool add_to_6ghz)
+static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
+ struct ieee80211_channel *chan,
+ bool add_to_6ghz)
{
int i;
u32 n_channels = request->n_channels;
@@ -838,30 +838,32 @@ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
return false;
}
-static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev,
+ bool first_part)
{
u8 i;
struct cfg80211_colocated_ap *ap;
int n_channels, count = 0, err;
- struct cfg80211_scan_request *request, *rdev_req = rdev->scan_req;
+ struct cfg80211_scan_request_int *request, *rdev_req = rdev->scan_req;
LIST_HEAD(coloc_ap_list);
bool need_scan_psc = true;
const struct ieee80211_sband_iftype_data *iftd;
size_t size, offs_ssids, offs_6ghz_params, offs_ies;
- rdev_req->scan_6ghz = true;
+ rdev_req->req.scan_6ghz = true;
+ rdev_req->req.first_part = first_part;
if (!rdev->wiphy.bands[NL80211_BAND_6GHZ])
return -EOPNOTSUPP;
iftd = ieee80211_get_sband_iftype_data(rdev->wiphy.bands[NL80211_BAND_6GHZ],
- rdev_req->wdev->iftype);
+ rdev_req->req.wdev->iftype);
if (!iftd || !iftd->he_cap.has_he)
return -EOPNOTSUPP;
n_channels = rdev->wiphy.bands[NL80211_BAND_6GHZ]->n_channels;
- if (rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) {
+ if (rdev_req->req.flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) {
struct cfg80211_internal_bss *intbss;
spin_lock_bh(&rdev->bss_lock);
@@ -883,8 +885,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
* This is relevant for ML probe requests when the lower
* band APs have not been discovered.
*/
- if (is_broadcast_ether_addr(rdev_req->bssid) ||
- !ether_addr_equal(rdev_req->bssid, res->bssid) ||
+ if (is_broadcast_ether_addr(rdev_req->req.bssid) ||
+ !ether_addr_equal(rdev_req->req.bssid, res->bssid) ||
res->channel->band != NL80211_BAND_6GHZ)
continue;
@@ -911,13 +913,13 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
spin_unlock_bh(&rdev->bss_lock);
}
- size = struct_size(request, channels, n_channels);
+ size = struct_size(request, req.channels, n_channels);
offs_ssids = size;
- size += sizeof(*request->ssids) * rdev_req->n_ssids;
+ size += sizeof(*request->req.ssids) * rdev_req->req.n_ssids;
offs_6ghz_params = size;
- size += sizeof(*request->scan_6ghz_params) * count;
+ size += sizeof(*request->req.scan_6ghz_params) * count;
offs_ies = size;
- size += rdev_req->ie_len;
+ size += rdev_req->req.ie_len;
request = kzalloc(size, GFP_KERNEL);
if (!request) {
@@ -926,26 +928,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
}
*request = *rdev_req;
- request->n_channels = 0;
- request->n_6ghz_params = 0;
- if (rdev_req->n_ssids) {
+ request->req.n_channels = 0;
+ request->req.n_6ghz_params = 0;
+ if (rdev_req->req.n_ssids) {
/*
* Add the ssids from the parent scan request to the new
* scan request, so the driver would be able to use them
* in its probe requests to discover hidden APs on PSC
* channels.
*/
- request->ssids = (void *)request + offs_ssids;
- memcpy(request->ssids, rdev_req->ssids,
- sizeof(*request->ssids) * request->n_ssids);
+ request->req.ssids = (void *)request + offs_ssids;
+ memcpy(request->req.ssids, rdev_req->req.ssids,
+ sizeof(*request->req.ssids) * request->req.n_ssids);
}
- request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+ request->req.scan_6ghz_params = (void *)request + offs_6ghz_params;
- if (rdev_req->ie_len) {
+ if (rdev_req->req.ie_len) {
void *ie = (void *)request + offs_ies;
- memcpy(ie, rdev_req->ie, rdev_req->ie_len);
- request->ie = ie;
+ memcpy(ie, rdev_req->req.ie, rdev_req->req.ie_len);
+ request->req.ie = ie;
}
/*
@@ -953,10 +955,12 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
* and at least one of the reported co-located APs with same SSID
* indicating that all APs in the same ESS are co-located
*/
- if (count && request->n_ssids == 1 && request->ssids[0].ssid_len) {
+ if (count &&
+ request->req.n_ssids == 1 &&
+ request->req.ssids[0].ssid_len) {
list_for_each_entry(ap, &coloc_ap_list, list) {
if (ap->colocated_ess &&
- cfg80211_find_ssid_match(ap, request)) {
+ cfg80211_find_ssid_match(ap, &request->req)) {
need_scan_psc = false;
break;
}
@@ -968,51 +972,52 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
* regardless of the collocated APs (PSC channels or all channels
* in case that NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set)
*/
- for (i = 0; i < rdev_req->n_channels; i++) {
- if (rdev_req->channels[i]->band == NL80211_BAND_6GHZ &&
+ for (i = 0; i < rdev_req->req.n_channels; i++) {
+ if (rdev_req->req.channels[i]->band == NL80211_BAND_6GHZ &&
((need_scan_psc &&
- cfg80211_channel_is_psc(rdev_req->channels[i])) ||
- !(rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))) {
- cfg80211_scan_req_add_chan(request,
- rdev_req->channels[i],
+ cfg80211_channel_is_psc(rdev_req->req.channels[i])) ||
+ !(rdev_req->req.flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))) {
+ cfg80211_scan_req_add_chan(&request->req,
+ rdev_req->req.channels[i],
false);
}
}
- if (!(rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))
+ if (!(rdev_req->req.flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))
goto skip;
list_for_each_entry(ap, &coloc_ap_list, list) {
bool found = false;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
- &request->scan_6ghz_params[request->n_6ghz_params];
+ &request->req.scan_6ghz_params[request->req.n_6ghz_params];
struct ieee80211_channel *chan =
ieee80211_get_channel(&rdev->wiphy, ap->center_freq);
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED ||
- !cfg80211_wdev_channel_allowed(rdev_req->wdev, chan))
+ !cfg80211_wdev_channel_allowed(rdev_req->req.wdev, chan))
continue;
- for (i = 0; i < rdev_req->n_channels; i++) {
- if (rdev_req->channels[i] == chan)
+ for (i = 0; i < rdev_req->req.n_channels; i++) {
+ if (rdev_req->req.channels[i] == chan)
found = true;
}
if (!found)
continue;
- if (request->n_ssids > 0 &&
- !cfg80211_find_ssid_match(ap, request))
+ if (request->req.n_ssids > 0 &&
+ !cfg80211_find_ssid_match(ap, &request->req))
continue;
- if (!is_broadcast_ether_addr(request->bssid) &&
- !ether_addr_equal(request->bssid, ap->bssid))
+ if (!is_broadcast_ether_addr(request->req.bssid) &&
+ !ether_addr_equal(request->req.bssid, ap->bssid))
continue;
- if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
+ if (!request->req.n_ssids && ap->multi_bss &&
+ !ap->transmitted_bssid)
continue;
- cfg80211_scan_req_add_chan(request, chan, true);
+ cfg80211_scan_req_add_chan(&request->req, chan, true);
memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN);
scan_6ghz_params->short_ssid = ap->short_ssid;
scan_6ghz_params->short_ssid_valid = ap->short_ssid_valid;
@@ -1028,14 +1033,14 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
if (cfg80211_channel_is_psc(chan) && !need_scan_psc)
scan_6ghz_params->psc_no_listen = true;
- request->n_6ghz_params++;
+ request->req.n_6ghz_params++;
}
skip:
cfg80211_free_coloc_ap_list(&coloc_ap_list);
- if (request->n_channels) {
- struct cfg80211_scan_request *old = rdev->int_scan_req;
+ if (request->req.n_channels) {
+ struct cfg80211_scan_request_int *old = rdev->int_scan_req;
rdev->int_scan_req = request;
@@ -1043,7 +1048,7 @@ skip:
* If this scan follows a previous scan, save the scan start
* info from the first part of the scan
*/
- if (old)
+ if (!first_part && !WARN_ON(!old))
rdev->int_scan_req->info = old->info;
err = rdev_scan(rdev, request);
@@ -1063,35 +1068,39 @@ skip:
int cfg80211_scan(struct cfg80211_registered_device *rdev)
{
- struct cfg80211_scan_request *request;
- struct cfg80211_scan_request *rdev_req = rdev->scan_req;
+ struct cfg80211_scan_request_int *request;
+ struct cfg80211_scan_request_int *rdev_req = rdev->scan_req;
u32 n_channels = 0, idx, i;
- if (!(rdev->wiphy.flags & WIPHY_FLAG_SPLIT_SCAN_6GHZ))
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SPLIT_SCAN_6GHZ)) {
+ rdev_req->req.first_part = true;
return rdev_scan(rdev, rdev_req);
+ }
- for (i = 0; i < rdev_req->n_channels; i++) {
- if (rdev_req->channels[i]->band != NL80211_BAND_6GHZ)
+ for (i = 0; i < rdev_req->req.n_channels; i++) {
+ if (rdev_req->req.channels[i]->band != NL80211_BAND_6GHZ)
n_channels++;
}
if (!n_channels)
- return cfg80211_scan_6ghz(rdev);
+ return cfg80211_scan_6ghz(rdev, true);
- request = kzalloc(struct_size(request, channels, n_channels),
+ request = kzalloc(struct_size(request, req.channels, n_channels),
GFP_KERNEL);
if (!request)
return -ENOMEM;
*request = *rdev_req;
- request->n_channels = n_channels;
+ request->req.n_channels = n_channels;
- for (i = idx = 0; i < rdev_req->n_channels; i++) {
- if (rdev_req->channels[i]->band != NL80211_BAND_6GHZ)
- request->channels[idx++] = rdev_req->channels[i];
+ for (i = idx = 0; i < rdev_req->req.n_channels; i++) {
+ if (rdev_req->req.channels[i]->band != NL80211_BAND_6GHZ)
+ request->req.channels[idx++] =
+ rdev_req->req.channels[i];
}
- rdev_req->scan_6ghz = false;
+ rdev_req->req.scan_6ghz = false;
+ rdev_req->req.first_part = true;
rdev->int_scan_req = request;
return rdev_scan(rdev, request);
}
@@ -1099,7 +1108,7 @@ int cfg80211_scan(struct cfg80211_registered_device *rdev)
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
bool send_message)
{
- struct cfg80211_scan_request *request, *rdev_req;
+ struct cfg80211_scan_request_int *request, *rdev_req;
struct wireless_dev *wdev;
struct sk_buff *msg;
#ifdef CONFIG_CFG80211_WEXT
@@ -1118,13 +1127,13 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
if (!rdev_req)
return;
- wdev = rdev_req->wdev;
+ wdev = rdev_req->req.wdev;
request = rdev->int_scan_req ? rdev->int_scan_req : rdev_req;
if (wdev_running(wdev) &&
(rdev->wiphy.flags & WIPHY_FLAG_SPLIT_SCAN_6GHZ) &&
- !rdev_req->scan_6ghz && !request->info.aborted &&
- !cfg80211_scan_6ghz(rdev))
+ !rdev_req->req.scan_6ghz && !request->info.aborted &&
+ !cfg80211_scan_6ghz(rdev, false))
return;
/*
@@ -1136,10 +1145,10 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
cfg80211_sme_scan_done(wdev->netdev);
if (!request->info.aborted &&
- request->flags & NL80211_SCAN_FLAG_FLUSH) {
+ request->req.flags & NL80211_SCAN_FLAG_FLUSH) {
/* flush entries from previous scans */
spin_lock_bh(&rdev->bss_lock);
- __cfg80211_bss_expire(rdev, request->scan_start);
+ __cfg80211_bss_expire(rdev, request->req.scan_start);
spin_unlock_bh(&rdev->bss_lock);
}
@@ -1175,13 +1184,16 @@ void __cfg80211_scan_done(struct wiphy *wiphy, struct wiphy_work *wk)
void cfg80211_scan_done(struct cfg80211_scan_request *request,
struct cfg80211_scan_info *info)
{
- struct cfg80211_scan_info old_info = request->info;
+ struct cfg80211_scan_request_int *intreq =
+ container_of(request, struct cfg80211_scan_request_int, req);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(request->wiphy);
+ struct cfg80211_scan_info old_info = intreq->info;
- trace_cfg80211_scan_done(request, info);
- WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req &&
- request != wiphy_to_rdev(request->wiphy)->int_scan_req);
+ trace_cfg80211_scan_done(intreq, info);
+ WARN_ON(intreq != rdev->scan_req &&
+ intreq != rdev->int_scan_req);
- request->info = *info;
+ intreq->info = *info;
/*
* In case the scan is split, the scan_start_tsf and tsf_bssid should
@@ -1189,14 +1201,13 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request,
* be non zero.
*/
if (request->scan_6ghz && old_info.scan_start_tsf) {
- request->info.scan_start_tsf = old_info.scan_start_tsf;
- memcpy(request->info.tsf_bssid, old_info.tsf_bssid,
- sizeof(request->info.tsf_bssid));
+ intreq->info.scan_start_tsf = old_info.scan_start_tsf;
+ memcpy(intreq->info.tsf_bssid, old_info.tsf_bssid,
+ sizeof(intreq->info.tsf_bssid));
}
- request->notified = true;
- wiphy_work_queue(request->wiphy,
- &wiphy_to_rdev(request->wiphy)->scan_done_wk);
+ intreq->notified = true;
+ wiphy_work_queue(request->wiphy, &rdev->scan_done_wk);
}
EXPORT_SYMBOL(cfg80211_scan_done);
@@ -1805,6 +1816,9 @@ static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
WARN_ON(ies != old_ies);
rcu_assign_pointer(bss->pub.beacon_ies, new_ies);
+
+ bss->ts = known->ts;
+ bss->pub.ts_boottime = known->pub.ts_boottime;
}
}
@@ -1871,6 +1885,10 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
{
lockdep_assert_held(&rdev->bss_lock);
+ /* Update time stamps */
+ known->ts = new->ts;
+ known->pub.ts_boottime = new->pub.ts_boottime;
+
/* Update IEs */
if (rcu_access_pointer(new->pub.proberesp_ies)) {
const struct cfg80211_bss_ies *old;
@@ -1905,7 +1923,8 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
*/
f = rcu_access_pointer(new->pub.beacon_ies);
- kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
+ if (!new->pub.hidden_beacon_bss)
+ kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
return false;
}
@@ -1933,8 +1952,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
if (signal_valid)
known->pub.signal = new->pub.signal;
known->pub.capability = new->pub.capability;
- known->ts = new->ts;
- known->pub.ts_boottime = new->pub.ts_boottime;
known->parent_tsf = new->parent_tsf;
known->pub.chains = new->pub.chains;
memcpy(known->pub.chain_signal, new->pub.chain_signal,
@@ -2220,6 +2237,7 @@ cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len)
return IEEE80211_REG_LPI_AP;
case IEEE80211_6GHZ_CTRL_REG_SP_AP:
case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD:
return IEEE80211_REG_SP_AP;
case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
return IEEE80211_REG_VLP_AP;
@@ -3496,7 +3514,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
struct cfg80211_registered_device *rdev;
struct wiphy *wiphy;
struct iw_scan_req *wreq = NULL;
- struct cfg80211_scan_request *creq;
+ struct cfg80211_scan_request_int *creq;
int i, err, n_channels = 0;
enum nl80211_band band;
@@ -3526,19 +3544,20 @@ int cfg80211_wext_siwscan(struct net_device *dev,
n_channels = ieee80211_get_num_supported_channels(wiphy);
}
- creq = kzalloc(struct_size(creq, channels, n_channels) +
+ creq = kzalloc(struct_size(creq, req.channels, n_channels) +
sizeof(struct cfg80211_ssid),
GFP_ATOMIC);
if (!creq)
return -ENOMEM;
- creq->wiphy = wiphy;
- creq->wdev = dev->ieee80211_ptr;
+ creq->req.wiphy = wiphy;
+ creq->req.wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
- creq->ssids = (void *)creq + struct_size(creq, channels, n_channels);
- creq->n_channels = n_channels;
- creq->n_ssids = 1;
- creq->scan_start = jiffies;
+ creq->req.ssids = (void *)creq +
+ struct_size(creq, req.channels, n_channels);
+ creq->req.n_channels = n_channels;
+ creq->req.n_ssids = 1;
+ creq->req.scan_start = jiffies;
/* translate "Scan on frequencies" request */
i = 0;
@@ -3554,7 +3573,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
/* ignore disabled channels */
chan = &wiphy->bands[band]->channels[j];
if (chan->flags & IEEE80211_CHAN_DISABLED ||
- !cfg80211_wdev_channel_allowed(creq->wdev, chan))
+ !cfg80211_wdev_channel_allowed(creq->req.wdev, chan))
continue;
/* If we have a wireless request structure and the
@@ -3577,7 +3596,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
}
wext_freq_found:
- creq->channels[i] = &wiphy->bands[band]->channels[j];
+ creq->req.channels[i] =
+ &wiphy->bands[band]->channels[j];
i++;
wext_freq_not_found: ;
}
@@ -3588,28 +3608,30 @@ int cfg80211_wext_siwscan(struct net_device *dev,
goto out;
}
- /* Set real number of channels specified in creq->channels[] */
- creq->n_channels = i;
+ /* Set real number of channels specified in creq->req.channels[] */
+ creq->req.n_channels = i;
/* translate "Scan for SSID" request */
if (wreq) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;
- memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
- creq->ssids[0].ssid_len = wreq->essid_len;
+ memcpy(creq->req.ssids[0].ssid, wreq->essid,
+ wreq->essid_len);
+ creq->req.ssids[0].ssid_len = wreq->essid_len;
}
if (wreq->scan_type == IW_SCAN_TYPE_PASSIVE) {
- creq->ssids = NULL;
- creq->n_ssids = 0;
+ creq->req.ssids = NULL;
+ creq->req.n_ssids = 0;
}
}
for (i = 0; i < NUM_NL80211_BANDS; i++)
if (wiphy->bands[i])
- creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+ creq->req.rates[i] =
+ (1 << wiphy->bands[i]->n_bitrates) - 1;
- eth_broadcast_addr(creq->bssid);
+ eth_broadcast_addr(creq->req.bssid);
scoped_guard(wiphy, &rdev->wiphy) {
rdev->scan_req = creq;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index cf998500a965..3a028ff287fb 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -5,7 +5,7 @@
* (for nl80211's connect() and wext)
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2009, 2020, 2022-2024 Intel Corporation. All rights reserved.
+ * Copyright (C) 2009, 2020, 2022-2025 Intel Corporation. All rights reserved.
* Copyright 2017 Intel Deutschland GmbH
*/
@@ -64,7 +64,7 @@ static void cfg80211_sme_free(struct wireless_dev *wdev)
static int cfg80211_conn_scan(struct wireless_dev *wdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- struct cfg80211_scan_request *request;
+ struct cfg80211_scan_request_int *request;
int n_channels, err;
lockdep_assert_wiphy(wdev->wiphy);
@@ -77,13 +77,12 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
else
n_channels = ieee80211_get_num_supported_channels(wdev->wiphy);
- request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) +
- sizeof(request->channels[0]) * n_channels,
+ request = kzalloc(sizeof(*request) + sizeof(request->req.ssids[0]) +
+ sizeof(request->req.channels[0]) * n_channels,
GFP_KERNEL);
if (!request)
return -ENOMEM;
- request->n_channels = n_channels;
if (wdev->conn->params.channel) {
enum nl80211_band band = wdev->conn->params.channel->band;
struct ieee80211_supported_band *sband =
@@ -93,8 +92,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
kfree(request);
return -EINVAL;
}
- request->channels[0] = wdev->conn->params.channel;
- request->rates[band] = (1 << sband->n_bitrates) - 1;
+ request->req.channels[0] = wdev->conn->params.channel;
+ request->req.rates[band] = (1 << sband->n_bitrates) - 1;
} else {
int i = 0, j;
enum nl80211_band band;
@@ -109,26 +108,26 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
channel = &bands->channels[j];
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
- request->channels[i++] = channel;
+ request->req.channels[i++] = channel;
}
- request->rates[band] = (1 << bands->n_bitrates) - 1;
+ request->req.rates[band] = (1 << bands->n_bitrates) - 1;
}
n_channels = i;
}
- request->n_channels = n_channels;
- request->ssids = (void *)request +
- struct_size(request, channels, n_channels);
- request->n_ssids = 1;
+ request->req.n_channels = n_channels;
+ request->req.ssids = (void *)request +
+ struct_size(request, req.channels, n_channels);
+ request->req.n_ssids = 1;
- memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
- wdev->conn->params.ssid_len);
- request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
+ memcpy(request->req.ssids[0].ssid, wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len);
+ request->req.ssids[0].ssid_len = wdev->conn->params.ssid_len;
- eth_broadcast_addr(request->bssid);
+ eth_broadcast_addr(request->req.bssid);
- request->wdev = wdev;
- request->wiphy = &rdev->wiphy;
- request->scan_start = jiffies;
+ request->req.wdev = wdev;
+ request->req.wiphy = &rdev->wiphy;
+ request->req.scan_start = jiffies;
rdev->scan_req = request;
@@ -901,13 +900,16 @@ void __cfg80211_connect_result(struct net_device *dev,
if (!wdev->u.client.ssid_len) {
rcu_read_lock();
for_each_valid_link(cr, link) {
+ u32 ssid_len;
+
ssid = ieee80211_bss_get_elem(cr->links[link].bss,
WLAN_EID_SSID);
if (!ssid || !ssid->datalen)
continue;
- memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
+ ssid_len = min(ssid->datalen, IEEE80211_MAX_SSID_LEN);
+ memcpy(wdev->u.client.ssid, ssid->data, ssid_len);
wdev->u.client.ssid_len = ssid->datalen;
break;
}
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 4ed9fada4ec0..8a4c34112eb5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -373,7 +373,8 @@ TRACE_EVENT(rdev_return_int,
);
TRACE_EVENT(rdev_scan,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request *request),
+ TP_PROTO(struct wiphy *wiphy,
+ struct cfg80211_scan_request_int *request),
TP_ARGS(wiphy, request),
TP_STRUCT__entry(
WIPHY_ENTRY
@@ -406,9 +407,19 @@ DEFINE_EVENT(wiphy_only_evt, rdev_return_void,
TP_ARGS(wiphy)
);
-DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna,
- TP_PROTO(struct wiphy *wiphy),
- TP_ARGS(wiphy)
+TRACE_EVENT(rdev_get_antenna,
+ TP_PROTO(struct wiphy *wiphy, int radio_idx),
+ TP_ARGS(wiphy, radio_idx),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(int, radio_idx)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ ),
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d",
+ WIPHY_PR_ARG, __entry->radio_idx)
);
DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll,
@@ -1678,18 +1689,20 @@ TRACE_EVENT(rdev_join_ocb,
);
TRACE_EVENT(rdev_set_wiphy_params,
- TP_PROTO(struct wiphy *wiphy, u32 changed),
- TP_ARGS(wiphy, changed),
+ TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 changed),
+ TP_ARGS(wiphy, radio_idx, changed),
TP_STRUCT__entry(
WIPHY_ENTRY
+ __field(int, radio_idx)
__field(u32, changed)
),
TP_fast_assign(
WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->changed = changed;
),
- TP_printk(WIPHY_PR_FMT ", changed: %u",
- WIPHY_PR_ARG, __entry->changed)
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d, changed: %u",
+ WIPHY_PR_ARG, __entry->radio_idx, __entry->changed)
);
DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
@@ -1710,30 +1723,51 @@ DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
);
-DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power,
+TRACE_EVENT(rdev_get_tx_power,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id),
- TP_ARGS(wiphy, wdev, link_id)
+ int radio_idx, unsigned int link_id),
+ TP_ARGS(wiphy, wdev, radio_idx, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(int, radio_idx)
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", radio_idx: %d, link_id: %u",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ __entry->radio_idx, __entry->link_id)
);
TRACE_EVENT(rdev_set_tx_power,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm),
- TP_ARGS(wiphy, wdev, type, mbm),
+ int radio_idx, enum nl80211_tx_power_setting type,
+ int mbm),
+ TP_ARGS(wiphy, wdev, radio_idx, type, mbm),
TP_STRUCT__entry(
WIPHY_ENTRY
WDEV_ENTRY
+ __field(int, radio_idx)
__field(enum nl80211_tx_power_setting, type)
__field(int, mbm)
),
TP_fast_assign(
WIPHY_ASSIGN;
WDEV_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->type = type;
__entry->mbm = mbm;
),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d",
- WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm)
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", radio_idx: %d, type: %u, mbm: %d",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ __entry->radio_idx, __entry->type, __entry->mbm)
);
TRACE_EVENT(rdev_return_int_int,
@@ -1866,26 +1900,24 @@ TRACE_EVENT(rdev_return_void_tx_rx,
__entry->rx_max)
);
-DECLARE_EVENT_CLASS(tx_rx_evt,
- TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, tx, rx),
+TRACE_EVENT(rdev_set_antenna,
+ TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 tx, u32 rx),
+ TP_ARGS(wiphy, radio_idx, tx, rx),
TP_STRUCT__entry(
WIPHY_ENTRY
+ __field(int, radio_idx)
__field(u32, tx)
__field(u32, rx)
),
TP_fast_assign(
WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->tx = tx;
__entry->rx = rx;
),
- TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ",
- WIPHY_PR_ARG, __entry->tx, __entry->rx)
-);
-
-DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
- TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, tx, rx)
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d, tx: %u, rx: %u ",
+ WIPHY_PR_ARG, __entry->radio_idx,
+ __entry->tx, __entry->rx)
);
DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
@@ -3105,23 +3137,6 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate,
TP_ARGS(netdev, macaddr)
);
-DECLARE_EVENT_CLASS(netdev_evt_only,
- TP_PROTO(struct net_device *netdev),
- TP_ARGS(netdev),
- TP_STRUCT__entry(
- NETDEV_ENTRY
- ),
- TP_fast_assign(
- NETDEV_ASSIGN;
- ),
- TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG)
-);
-
-DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth,
- TP_PROTO(struct net_device *netdev),
- TP_ARGS(netdev)
-);
-
TRACE_EVENT(cfg80211_send_rx_assoc,
TP_PROTO(struct net_device *netdev,
const struct cfg80211_rx_assoc_resp_data *data),
@@ -3448,21 +3463,6 @@ TRACE_EVENT(cfg80211_reg_can_beacon,
__entry->prohibited_flags, __entry->permitting_flags)
);
-TRACE_EVENT(cfg80211_chandef_dfs_required,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
- TP_ARGS(wiphy, chandef),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- CHAN_DEF_ENTRY
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- CHAN_DEF_ASSIGN(chandef);
- ),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
-);
-
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
@@ -3538,27 +3538,30 @@ TRACE_EVENT(cfg80211_cac_event,
);
DECLARE_EVENT_CLASS(cfg80211_rx_evt,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr),
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
MAC_ENTRY(addr)
+ __field(int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
MAC_ASSIGN(addr, addr);
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", %pM", NETDEV_PR_ARG, __entry->addr)
+ TP_printk(NETDEV_PR_FMT ", %pM, link_id:%d", NETDEV_PR_ARG,
+ __entry->addr, __entry->link_id)
);
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id)
);
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id)
);
TRACE_EVENT(cfg80211_ibss_joined,
@@ -3685,12 +3688,12 @@ TRACE_EVENT(cfg80211_tdls_oper_request,
);
TRACE_EVENT(cfg80211_scan_done,
- TP_PROTO(struct cfg80211_scan_request *request,
+ TP_PROTO(struct cfg80211_scan_request_int *request,
struct cfg80211_scan_info *info),
TP_ARGS(request, info),
TP_STRUCT__entry(
__field(u32, n_channels)
- __dynamic_array(u8, ie, request ? request->ie_len : 0)
+ __dynamic_array(u8, ie, request ? request->req.ie_len : 0)
__array(u32, rates, NUM_NL80211_BANDS)
__field(u32, wdev_id)
MAC_ENTRY(wiphy_mac)
@@ -3701,16 +3704,16 @@ TRACE_EVENT(cfg80211_scan_done,
),
TP_fast_assign(
if (request) {
- memcpy(__get_dynamic_array(ie), request->ie,
- request->ie_len);
- memcpy(__entry->rates, request->rates,
+ memcpy(__get_dynamic_array(ie), request->req.ie,
+ request->req.ie_len);
+ memcpy(__entry->rates, request->req.rates,
NUM_NL80211_BANDS);
- __entry->wdev_id = request->wdev ?
- request->wdev->identifier : 0;
- if (request->wiphy)
+ __entry->wdev_id = request->req.wdev ?
+ request->req.wdev->identifier : 0;
+ if (request->req.wiphy)
MAC_ASSIGN(wiphy_mac,
- request->wiphy->perm_addr);
- __entry->no_cck = request->no_cck;
+ request->req.wiphy->perm_addr);
+ __entry->no_cck = request->req.no_cck;
}
if (info) {
__entry->aborted = info->aborted;
@@ -3827,30 +3830,6 @@ DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss,
TP_ARGS(pub)
);
-TRACE_EVENT(cfg80211_return_uint,
- TP_PROTO(unsigned int ret),
- TP_ARGS(ret),
- TP_STRUCT__entry(
- __field(unsigned int, ret)
- ),
- TP_fast_assign(
- __entry->ret = ret;
- ),
- TP_printk("ret: %d", __entry->ret)
-);
-
-TRACE_EVENT(cfg80211_return_u32,
- TP_PROTO(u32 ret),
- TP_ARGS(ret),
- TP_STRUCT__entry(
- __field(u32, ret)
- ),
- TP_fast_assign(
- __entry->ret = ret;
- ),
- TP_printk("ret: %u", __entry->ret)
-);
-
TRACE_EVENT(cfg80211_report_wowlan_wakeup,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_wowlan_wakeup *wakeup),
@@ -4126,20 +4105,22 @@ TRACE_EVENT(cfg80211_links_removed,
TRACE_EVENT(cfg80211_mlo_reconf_add_done,
TP_PROTO(struct net_device *netdev, u16 link_mask,
- const u8 *buf, size_t len),
- TP_ARGS(netdev, link_mask, buf, len),
+ const u8 *buf, size_t len, bool driver_initiated),
+ TP_ARGS(netdev, link_mask, buf, len, driver_initiated),
TP_STRUCT__entry(
NETDEV_ENTRY
__field(u16, link_mask)
__dynamic_array(u8, buf, len)
+ __field(bool, driver_initiated)
),
TP_fast_assign(
NETDEV_ASSIGN;
__entry->link_mask = link_mask;
memcpy(__get_dynamic_array(buf), buf, len);
+ __entry->driver_initiated = driver_initiated;
),
- TP_printk(NETDEV_PR_FMT ", link_mask:0x%x",
- NETDEV_PR_ARG, __entry->link_mask)
+ TP_printk(NETDEV_PR_FMT ", link_mask:0x%x, driver_initiated:%d",
+ NETDEV_PR_ARG, __entry->link_mask, __entry->driver_initiated)
);
TRACE_EVENT(rdev_assoc_ml_reconf,
@@ -4185,6 +4166,41 @@ TRACE_EVENT(cfg80211_epcs_changed,
WDEV_PR_ARG, __entry->enabled)
);
+TRACE_EVENT(cfg80211_next_nan_dw_notif,
+ TP_PROTO(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan),
+ TP_ARGS(wdev, chan),
+ TP_STRUCT__entry(
+ WDEV_ENTRY
+ CHAN_ENTRY
+ ),
+ TP_fast_assign(
+ WDEV_ASSIGN;
+ CHAN_ASSIGN(chan);
+ ),
+ TP_printk(WDEV_PR_FMT " " CHAN_PR_FMT,
+ WDEV_PR_ARG, CHAN_PR_ARG)
+);
+
+TRACE_EVENT(cfg80211_nan_cluster_joined,
+ TP_PROTO(struct wireless_dev *wdev,
+ const u8 *cluster_id,
+ bool new_cluster),
+ TP_ARGS(wdev, cluster_id, new_cluster),
+ TP_STRUCT__entry(
+ WDEV_ENTRY
+ MAC_ENTRY(cluster_id)
+ __field(bool, new_cluster)
+ ),
+ TP_fast_assign(
+ WDEV_ASSIGN;
+ MAC_ASSIGN(cluster_id, cluster_id);
+ __entry->new_cluster = new_cluster;
+ ),
+ TP_printk(WDEV_PR_FMT " cluster_id %pMF%s",
+ WDEV_PR_ARG, __entry->cluster_id,
+ __entry->new_cluster ? " [new]" : "")
+);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ed868c0f7ca8..56724b33af04 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -106,33 +106,6 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band)
}
EXPORT_SYMBOL(ieee80211_channel_to_freq_khz);
-enum nl80211_chan_width
-ieee80211_s1g_channel_width(const struct ieee80211_channel *chan)
-{
- if (WARN_ON(!chan || chan->band != NL80211_BAND_S1GHZ))
- return NL80211_CHAN_WIDTH_20_NOHT;
-
- /*S1G defines a single allowed channel width per channel.
- * Extract that width here.
- */
- if (chan->flags & IEEE80211_CHAN_1MHZ)
- return NL80211_CHAN_WIDTH_1;
- else if (chan->flags & IEEE80211_CHAN_2MHZ)
- return NL80211_CHAN_WIDTH_2;
- else if (chan->flags & IEEE80211_CHAN_4MHZ)
- return NL80211_CHAN_WIDTH_4;
- else if (chan->flags & IEEE80211_CHAN_8MHZ)
- return NL80211_CHAN_WIDTH_8;
- else if (chan->flags & IEEE80211_CHAN_16MHZ)
- return NL80211_CHAN_WIDTH_16;
-
- pr_err("unknown channel width for channel at %dKHz?\n",
- ieee80211_channel_to_khz(chan));
-
- return NL80211_CHAN_WIDTH_1;
-}
-EXPORT_SYMBOL(ieee80211_s1g_channel_width);
-
int ieee80211_freq_khz_to_channel(u32 freq)
{
/* TODO: just handle MHz for now */
@@ -820,6 +793,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
}
EXPORT_SYMBOL(ieee80211_is_valid_amsdu);
+
+/*
+ * Detects if an MSDU frame was maliciously converted into an A-MSDU
+ * frame by an adversary. This is done by parsing the received frame
+ * as if it were a regular MSDU, even though the A-MSDU flag is set.
+ *
+ * For non-mesh interfaces, detection involves checking whether the
+ * payload, when interpreted as an MSDU, begins with a valid RFC1042
+ * header. This is done by comparing the A-MSDU subheader's destination
+ * address to the start of the RFC1042 header.
+ *
+ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field
+ * and an optional variable-length Mesh Address Extension field before
+ * the RFC1042 header. The position of the RFC1042 header must therefore
+ * be calculated based on the mesh header length.
+ *
+ * Since this function intentionally parses an A-MSDU frame as an MSDU,
+ * it only assumes that the A-MSDU subframe header is present, and
+ * beyond this it performs its own bounds checks under the assumption
+ * that the frame is instead parsed as a non-aggregated MSDU.
+ */
+static bool
+is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb,
+ enum nl80211_iftype iftype)
+{
+ int offset;
+
+ /* Non-mesh case can be directly compared */
+ if (iftype != NL80211_IFTYPE_MESH_POINT)
+ return ether_addr_equal(eth->h_dest, rfc1042_header);
+
+ offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]);
+ if (offset == 6) {
+ /* Mesh case with empty address extension field */
+ return ether_addr_equal(eth->h_source, rfc1042_header);
+ } else if (offset + ETH_ALEN <= skb->len) {
+ /* Mesh case with non-empty address extension field */
+ u8 temp[ETH_ALEN];
+
+ skb_copy_bits(skb, offset, temp, ETH_ALEN);
+ return ether_addr_equal(temp, rfc1042_header);
+ }
+
+ return false;
+}
+
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
@@ -861,8 +880,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
/* the last MSDU has no padding */
if (subframe_len > remaining)
goto purge;
- /* mitigate A-MSDU aggregation injection attacks */
- if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header))
+ /* mitigate A-MSDU aggregation injection attacks, to be
+ * checked when processing first subframe (offset == 0).
+ */
+ if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype))
goto purge;
offset += sizeof(struct ethhdr);
@@ -2516,6 +2537,30 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_check_combinations);
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+ const struct ieee80211_channel *chan)
+{
+ const struct wiphy_radio *radio;
+ int i, j;
+ u32 freq;
+
+ if (!chan)
+ return -EINVAL;
+
+ freq = ieee80211_channel_to_khz(chan);
+ for (i = 0; i < wiphy->n_radio; i++) {
+ radio = &wiphy->radio[i];
+ for (j = 0; j < radio->n_freq_range; j++) {
+ if (freq >= radio->freq_range[j].start_freq &&
+ freq < radio->freq_range[j].end_freq)
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(cfg80211_get_radio_idx_by_chan);
+
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask)
@@ -2626,6 +2671,18 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
return false;
}
+int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
+ gfp_t gfp)
+{
+ link_sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
+ sizeof(*link_sinfo->pertid), gfp);
+ if (!link_sinfo->pertid)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(cfg80211_link_sinfo_alloc_tid_stats);
+
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp)
{
sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
@@ -2908,7 +2965,7 @@ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
u32 freq, width;
freq = ieee80211_chandef_to_khz(chandef);
- width = cfg80211_chandef_get_width(chandef);
+ width = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
if (!ieee80211_radio_freq_range_valid(radio, freq, width))
return false;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a74b1afc594e..1241fda78a68 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -263,7 +263,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
else
wdev->wiphy->rts_threshold = rts->value;
- err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD);
+ err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_RTS_THRESHOLD);
if (err)
wdev->wiphy->rts_threshold = orts;
return err;
@@ -304,7 +304,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
wdev->wiphy->frag_threshold = frag->value & ~0x1;
}
- err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD);
+ err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_FRAG_THRESHOLD);
if (err)
wdev->wiphy->frag_threshold = ofrag;
return err;
@@ -355,7 +355,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
changed |= WIPHY_PARAM_RETRY_SHORT;
}
- err = rdev_set_wiphy_params(rdev, changed);
+ err = rdev_set_wiphy_params(rdev, -1, changed);
if (err) {
wdev->wiphy->retry_short = oshort;
wdev->wiphy->retry_long = olong;
@@ -890,7 +890,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
guard(wiphy)(&rdev->wiphy);
- return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
+ return rdev_set_tx_power(rdev, wdev, -1, type, DBM_TO_MBM(dbm));
}
static int cfg80211_wext_giwtxpower(struct net_device *dev,
@@ -910,7 +910,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
return -EOPNOTSUPP;
scoped_guard(wiphy, &rdev->wiphy) {
- err = rdev_get_tx_power(rdev, wdev, 0, &val);
+ err = rdev_get_tx_power(rdev, wdev, -1, 0, &val);
}
if (err)
return err;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index bea70eb6f034..c32a7c6903d5 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -431,7 +431,7 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
r->__ifi_pad = 0;
r->ifi_type = dev->type;
r->ifi_index = dev->ifindex;
- r->ifi_flags = dev_get_flags(dev);
+ r->ifi_flags = netif_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */
if (nla_put_string(skb, IFLA_IFNAME, dev->name))
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 1f8ae9f4a3f1..655d1e0ae25f 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -891,7 +891,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock,
if (sk->sk_state != TCP_LISTEN)
goto out2;
- rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
+ rc = x25_wait_for_data(sk, READ_ONCE(sk->sk_rcvtimeo));
if (rc)
goto out2;
skb = skb_dequeue(&sk->sk_receive_queue);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 748d8630ab58..fb8ac1aa5826 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -170,28 +170,6 @@ void x25_establish_link(struct x25_neigh *nb)
dev_queue_xmit(skb);
}
-void x25_terminate_link(struct x25_neigh *nb)
-{
- struct sk_buff *skb;
- unsigned char *ptr;
-
- if (nb->dev->type != ARPHRD_X25)
- return;
-
- skb = alloc_skb(1, GFP_ATOMIC);
- if (!skb) {
- pr_err("x25_dev: out of memory\n");
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_DISCONNECT;
-
- skb->protocol = htons(ETH_P_X25);
- skb->dev = nb->dev;
- dev_queue_xmit(skb);
-}
-
void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
{
unsigned char *dptr;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 72c000c0ae5f..7b0c68a70888 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -34,7 +34,21 @@
#include "xsk.h"
#define TX_BATCH_SIZE 32
-#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
+#define MAX_PER_SOCKET_BUDGET 32
+
+struct xsk_addr_node {
+ u64 addr;
+ struct list_head addr_node;
+};
+
+struct xsk_addr_head {
+ u32 num_descs;
+ struct list_head addrs_list;
+};
+
+static struct kmem_cache *xsk_tx_generic_cache;
+
+#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
@@ -300,6 +314,13 @@ static bool xsk_tx_writeable(struct xdp_sock *xs)
return true;
}
+static void __xsk_tx_release(struct xdp_sock *xs)
+{
+ __xskq_cons_release(xs->tx);
+ if (xsk_tx_writeable(xs))
+ xs->sk.sk_write_space(&xs->sk);
+}
+
static bool xsk_is_bound(struct xdp_sock *xs)
{
if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -407,11 +428,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
- __xskq_cons_release(xs->tx);
- if (xsk_tx_writeable(xs))
- xs->sk.sk_write_space(&xs->sk);
- }
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
+ __xsk_tx_release(xs);
rcu_read_unlock();
}
EXPORT_SYMBOL(xsk_tx_release);
@@ -528,24 +546,43 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
+static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&pool->cq_lock, flags);
- ret = xskq_prod_reserve_addr(pool->cq, addr);
+ ret = xskq_prod_reserve(pool->cq);
spin_unlock_irqrestore(&pool->cq_lock, flags);
return ret;
}
-static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
+static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
+ struct sk_buff *skb)
{
+ struct xsk_addr_node *pos, *tmp;
+ u32 descs_processed = 0;
unsigned long flags;
+ u32 idx;
spin_lock_irqsave(&pool->cq_lock, flags);
- xskq_prod_submit_n(pool->cq, n);
+ idx = xskq_get_prod(pool->cq);
+
+ xskq_prod_write_addr(pool->cq, idx,
+ (u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
+ descs_processed++;
+
+ if (unlikely(XSKCB(skb)->num_descs > 1)) {
+ list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ xskq_prod_write_addr(pool->cq, idx + descs_processed,
+ pos->addr);
+ descs_processed++;
+ list_del(&pos->addr_node);
+ kmem_cache_free(xsk_tx_generic_cache, pos);
+ }
+ }
+ xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
@@ -558,9 +595,14 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ XSKCB(skb)->num_descs++;
+}
+
static u32 xsk_get_num_desc(struct sk_buff *skb)
{
- return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
+ return XSKCB(skb)->num_descs;
}
static void xsk_destruct_skb(struct sk_buff *skb)
@@ -572,23 +614,38 @@ static void xsk_destruct_skb(struct sk_buff *skb)
*compl->tx_timestamp = ktime_get_tai_fast_ns();
}
- xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
+ xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
sock_wfree(skb);
}
-static void xsk_set_destructor_arg(struct sk_buff *skb)
+static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
+ u64 addr)
{
- long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
-
- skb_shinfo(skb)->destructor_arg = (void *)num;
+ BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
+ INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
+ skb->dev = xs->dev;
+ skb->priority = READ_ONCE(xs->sk.sk_priority);
+ skb->mark = READ_ONCE(xs->sk.sk_mark);
+ XSKCB(skb)->num_descs = 0;
+ skb->destructor = xsk_destruct_skb;
+ skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addr_node *pos, *tmp;
+
+ if (unlikely(num_descs > 1)) {
+ list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ list_del(&pos->addr_node);
+ kmem_cache_free(xsk_tx_generic_cache, pos);
+ }
+ }
skb->destructor = sock_wfree;
- xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
+ xsk_cq_cancel_locked(xs->pool, num_descs);
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
xs->skb = NULL;
@@ -600,17 +657,60 @@ static void xsk_drop_skb(struct sk_buff *skb)
xsk_consume_skb(skb);
}
+static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
+ struct xdp_desc *desc, struct xsk_buff_pool *pool,
+ u32 hr)
+{
+ struct xsk_tx_metadata *meta = NULL;
+
+ if (unlikely(pool->tx_metadata_len == 0))
+ return -EINVAL;
+
+ meta = buffer - pool->tx_metadata_len;
+ if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
+ return -EINVAL;
+
+ if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
+ if (unlikely(meta->request.csum_start +
+ meta->request.csum_offset +
+ sizeof(__sum16) > desc->len))
+ return -EINVAL;
+
+ skb->csum_start = hr + meta->request.csum_start;
+ skb->csum_offset = meta->request.csum_offset;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if (unlikely(pool->tx_sw_csum)) {
+ int err;
+
+ err = skb_checksum_help(skb);
+ if (err)
+ return err;
+ }
+ }
+
+ if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
+ skb->skb_mstamp_ns = meta->request.launch_time;
+ xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
+
+ return 0;
+}
+
static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
struct xdp_desc *desc)
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
+ struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
int err, i;
u64 addr;
+ addr = desc->addr;
+ buffer = xsk_buff_raw_get_data(pool, addr);
+
if (!skb) {
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
@@ -619,13 +719,29 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
skb_reserve(skb, hr);
+
+ xsk_skb_init_misc(skb, xs, desc->addr);
+ if (desc->options & XDP_TX_METADATA) {
+ err = xsk_skb_metadata(skb, buffer, desc, pool, hr);
+ if (unlikely(err))
+ return ERR_PTR(err);
+ }
+ } else {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ /* in case of -EOVERFLOW that could happen below,
+ * xsk_consume_skb() will release this node as whole skb
+ * would be dropped, which implies freeing all list elements
+ */
+ xsk_addr->addr = desc->addr;
+ list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
}
- addr = desc->addr;
len = desc->len;
ts = pool->unaligned ? len : pool->chunk_size;
- buffer = xsk_buff_raw_get_data(pool, addr);
offset = offset_in_page(buffer);
addr = buffer - pool->addrs;
@@ -656,16 +772,15 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
struct xdp_desc *desc)
{
- struct xsk_tx_metadata *meta = NULL;
struct net_device *dev = xs->dev;
struct sk_buff *skb = xs->skb;
- bool first_frag = false;
int err;
if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
skb = xsk_build_skb_zerocopy(xs, desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
+ skb = NULL;
goto free_err;
}
} else {
@@ -676,8 +791,6 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
len = desc->len;
if (!skb) {
- first_frag = true;
-
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
tr = dev->needed_tailroom;
skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
@@ -690,8 +803,17 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
err = skb_store_bits(skb, 0, buffer, len);
if (unlikely(err))
goto free_err;
+
+ xsk_skb_init_misc(skb, xs, desc->addr);
+ if (desc->options & XDP_TX_METADATA) {
+ err = xsk_skb_metadata(skb, buffer, desc,
+ xs->pool, hr);
+ if (unlikely(err))
+ goto free_err;
+ }
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct xsk_addr_node *xsk_addr;
struct page *page;
u8 *vaddr;
@@ -706,66 +828,36 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
+ if (!xsk_addr) {
+ __free_page(page);
+ err = -ENOMEM;
+ goto free_err;
+ }
+
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
- }
-
- if (first_frag && desc->options & XDP_TX_METADATA) {
- if (unlikely(xs->pool->tx_metadata_len == 0)) {
- err = -EINVAL;
- goto free_err;
- }
-
- meta = buffer - xs->pool->tx_metadata_len;
- if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
- err = -EINVAL;
- goto free_err;
- }
-
- if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
- if (unlikely(meta->request.csum_start +
- meta->request.csum_offset +
- sizeof(__sum16) > len)) {
- err = -EINVAL;
- goto free_err;
- }
- skb->csum_start = hr + meta->request.csum_start;
- skb->csum_offset = meta->request.csum_offset;
- skb->ip_summed = CHECKSUM_PARTIAL;
-
- if (unlikely(xs->pool->tx_sw_csum)) {
- err = skb_checksum_help(skb);
- if (err)
- goto free_err;
- }
- }
-
- if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
- skb->skb_mstamp_ns = meta->request.launch_time;
+ xsk_addr->addr = desc->addr;
+ list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
}
}
- skb->dev = dev;
- skb->priority = READ_ONCE(xs->sk.sk_priority);
- skb->mark = READ_ONCE(xs->sk.sk_mark);
- skb->destructor = xsk_destruct_skb;
- xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
- xsk_set_destructor_arg(skb);
+ xsk_inc_num_desc(skb);
return skb;
free_err:
- if (first_frag && skb)
+ if (skb && !skb_shinfo(skb)->nr_frags)
kfree_skb(skb);
if (err == -EOVERFLOW) {
/* Drop the packet */
- xsk_set_destructor_arg(xs->skb);
+ xsk_inc_num_desc(xs->skb);
xsk_drop_skb(xs->skb);
xskq_cons_release(xs->tx);
} else {
@@ -779,10 +871,10 @@ free_err:
static int __xsk_generic_xmit(struct sock *sk)
{
struct xdp_sock *xs = xdp_sk(sk);
- u32 max_batch = TX_BATCH_SIZE;
bool sent_frame = false;
struct xdp_desc desc;
struct sk_buff *skb;
+ u32 max_batch;
int err = 0;
mutex_lock(&xs->mutex);
@@ -796,6 +888,7 @@ static int __xsk_generic_xmit(struct sock *sk)
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
+ max_batch = READ_ONCE(xs->max_tx_budget);
while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
if (max_batch-- == 0) {
err = -EAGAIN;
@@ -807,7 +900,7 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ err = xsk_cq_reserve_locked(xs->pool);
if (err) {
err = -EAGAIN;
goto out;
@@ -858,8 +951,7 @@ static int __xsk_generic_xmit(struct sock *sk)
out:
if (sent_frame)
- if (xsk_tx_writeable(xs))
- sk->sk_write_space(sk);
+ __xsk_tx_release(xs);
mutex_unlock(&xs->mutex);
return err;
@@ -1437,6 +1529,21 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
mutex_unlock(&xs->mutex);
return err;
}
+ case XDP_MAX_TX_SKB_BUDGET:
+ {
+ unsigned int budget;
+
+ if (optlen != sizeof(budget))
+ return -EINVAL;
+ if (copy_from_sockptr(&budget, optval, sizeof(budget)))
+ return -EFAULT;
+ if (!xs->tx ||
+ budget < TX_BATCH_SIZE || budget > xs->tx->nentries)
+ return -EACCES;
+
+ WRITE_ONCE(xs->max_tx_budget, budget);
+ return 0;
+ }
default:
break;
}
@@ -1734,6 +1841,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
xs->state = XSK_READY;
+ xs->max_tx_budget = TX_BATCH_SIZE;
mutex_init(&xs->mutex);
INIT_LIST_HEAD(&xs->map_list);
@@ -1795,8 +1903,18 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
+ xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
+ sizeof(struct xsk_addr_node),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!xsk_tx_generic_cache) {
+ err = -ENOMEM;
+ goto out_unreg_notif;
+ }
+
return 0;
+out_unreg_notif:
+ unregister_netdevice_notifier(&xsk_netdev_notifier);
out_pernet:
unregister_pernet_subsys(&xsk_net_ops);
out_sk:
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 09dcea0cbbed..0e0bca031c03 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -119,7 +119,7 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
if ((req->xdiag_show & XDP_SHOW_INFO) &&
nla_put_u32(nlskb, XDP_DIAG_UID,
- from_kuid_munged(user_ns, sock_i_uid(sk))))
+ from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 46d87e961ad6..1eb8d9f8b104 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -143,14 +143,24 @@ static inline bool xp_unused_options_set(u32 options)
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 addr = desc->addr - pool->tx_metadata_len;
- u64 len = desc->len + pool->tx_metadata_len;
- u64 offset = addr & (pool->chunk_size - 1);
+ u64 len = desc->len;
+ u64 addr, offset;
- if (!desc->len)
+ if (!len)
return false;
- if (offset + len > pool->chunk_size)
+ /* Can overflow if desc->addr < pool->tx_metadata_len */
+ if (check_sub_overflow(desc->addr, pool->tx_metadata_len, &addr))
+ return false;
+
+ offset = addr & (pool->chunk_size - 1);
+
+ /*
+ * Can't overflow: @offset is guaranteed to be < ``U32_MAX``
+ * (pool->chunk_size is ``u32``), @len is guaranteed
+ * to be <= ``U32_MAX``.
+ */
+ if (offset + len + pool->tx_metadata_len > pool->chunk_size)
return false;
if (addr >= pool->addrs_cnt)
@@ -158,27 +168,42 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
if (xp_unused_options_set(desc->options))
return false;
+
return true;
}
static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len;
- u64 len = desc->len + pool->tx_metadata_len;
+ u64 len = desc->len;
+ u64 addr, end;
- if (!desc->len)
+ if (!len)
return false;
+ /* Can't overflow: @len is guaranteed to be <= ``U32_MAX`` */
+ len += pool->tx_metadata_len;
if (len > pool->chunk_size)
return false;
- if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt ||
- xp_desc_crosses_non_contig_pg(pool, addr, len))
+ /* Can overflow if desc->addr is close to 0 */
+ if (check_sub_overflow(xp_unaligned_add_offset_to_addr(desc->addr),
+ pool->tx_metadata_len, &addr))
+ return false;
+
+ if (addr >= pool->addrs_cnt)
+ return false;
+
+ /* Can overflow if pool->addrs_cnt is high enough */
+ if (check_add_overflow(addr, len, &end) || end > pool->addrs_cnt)
+ return false;
+
+ if (xp_desc_crosses_non_contig_pg(pool, addr, len))
return false;
if (xp_unused_options_set(desc->options))
return false;
+
return true;
}
@@ -344,6 +369,11 @@ static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
/* Functions for producers */
+static inline u32 xskq_get_prod(struct xsk_queue *q)
+{
+ return READ_ONCE(q->ring->producer);
+}
+
static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
{
u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
@@ -390,6 +420,13 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
return 0;
}
+static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+
+ ring->desc[idx & q->ring_mask] = addr;
+}
+
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
u32 nb_entries)
{
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index fc7a603b04f1..bf744ac9d5a7 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -555,14 +555,10 @@ static void espintcp_close(struct sock *sk, long timeout)
static __poll_t espintcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (!skb_queue_empty(&ctx->ike_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
+ return datagram_poll_queue(file, sock, wait, &ctx->ike_queue);
}
static void build_protos(struct proto *espintcp_prot,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 81fd486b5e56..44b9de6e4e77 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -155,7 +155,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
- if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) {
+ if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
+ unlikely(xmit_xfrm_check_overflow(skb)))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
@@ -305,7 +306,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
return -EINVAL;
}
- xfrm_set_type_offload(x);
if (!x->type_offload) {
NL_SET_ERR_MSG(extack, "Type doesn't support offload");
dev_put(dev);
@@ -416,10 +416,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
struct net_device *dev = x->xso.dev;
bool check_tunnel_size;
- if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED)
+ if (!x->type_offload ||
+ (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
return false;
- if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) {
+ if ((!dev || dev == xfrm_dst_path(dst)->dev) &&
+ !xdst->child->xfrm) {
mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
goto ok;
@@ -431,9 +433,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return false;
ok:
+ if (!dev)
+ return true;
+
check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
x->props.mode == XFRM_MODE_TUNNEL;
- switch (x->props.family) {
+ switch (x->inner_mode.family) {
case AF_INET:
/* Check for IPv4 options */
if (ip_hdr(skb)->ihl != 5)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 7e6a71b9d6a3..c9ddef869aa5 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -503,6 +503,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
/* An encap_type of -1 indicates async resumption. */
if (encap_type == -1) {
async = 1;
+ dev_put(skb->dev);
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
@@ -649,18 +650,18 @@ lock:
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
- dev_hold(skb->dev);
-
- if (crypto_done)
+ if (crypto_done) {
nexthdr = x->type_offload->input_tail(x, skb);
- else
+ } else {
+ dev_hold(skb->dev);
+
nexthdr = x->type->input(x, skb);
+ if (nexthdr == -EINPROGRESS)
+ return 0;
- if (nexthdr == -EINPROGRESS)
- return 0;
+ dev_put(skb->dev);
+ }
resume:
- dev_put(skb->dev);
-
spin_lock(&x->lock);
if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index cb1e12740c87..330a05286a56 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -875,7 +875,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- if (p.collect_md) {
+ if (p.collect_md || xi->p.collect_md) {
NL_SET_ERR_MSG(extack, "collect_md can't be changed");
return -EINVAL;
}
@@ -886,11 +886,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
} else {
if (xi->dev != dev)
return -EEXIST;
- if (xi->p.collect_md) {
- NL_SET_ERR_MSG(extack,
- "device can't be changed to collect_md");
- return -EINVAL;
- }
}
return xfrmi_update(xi, &p);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 907c3ccb440d..43fdc6ed8dd1 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -97,7 +97,7 @@ static int ipcomp_input_done2(struct sk_buff *skb, int err)
struct ip_comp_hdr *ipch = ip_comp_hdr(skb);
const int plen = skb->len;
- skb_reset_transport_header(skb);
+ skb->transport_header = skb->network_header + sizeof(*ipch);
return ipcomp_post_acomp(skb, err, 0) ?:
skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL :
@@ -313,7 +313,6 @@ void ipcomp_destroy(struct xfrm_state *x)
struct ipcomp_data *ipcd = x->data;
if (!ipcd)
return;
- xfrm_state_delete_tunnel(x);
ipcomp_free_data(ipcd);
kfree(ipcd);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 094d2454602e..62486f866975 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2594,7 +2594,7 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
static dscp_t xfrm_get_dscp(const struct flowi *fl, int family)
{
if (family == AF_INET)
- return inet_dsfield_to_dscp(fl->u.ip4.flowi4_tos);
+ return fl->u.ip4.flowi4_dscp;
return 0;
}
@@ -3462,7 +3462,7 @@ decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reve
}
fl4->flowi4_proto = flkeys->basic.ip_proto;
- fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(flkeys->ip.tos);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -3594,7 +3594,7 @@ static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
fl1->flowi_oif = fl->flowi_oif;
fl1->flowi_mark = fl->flowi_mark;
- fl1->flowi_tos = fl->flowi_tos;
+ fl1->flowi_dscp = fl->flowi_dscp;
nf_nat_decode_session(newskb, fl1, family);
ret = false;
@@ -3881,12 +3881,18 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
}
skb_dst_force(skb);
- if (!skb_dst(skb)) {
+ dst = skb_dst(skb);
+ if (!dst) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
- dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
+ /* ignore return value from skb_dstref_steal, xfrm_lookup takes
+ * care of dropping the refcnt if needed.
+ */
+ skb_dstref_steal(skb);
+
+ dst = xfrm_lookup(net, dst, &fl, NULL, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst)) {
res = 0;
dst = NULL;
@@ -3925,7 +3931,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
* This will force stale_bundle() to fail on any xdst bundle with
* this dst linked in it.
*/
- if (dst->obsolete < 0 && !stale_bundle(dst))
+ if (READ_ONCE(dst->obsolete) < 0 && !stale_bundle(dst))
return dst;
return NULL;
@@ -3953,7 +3959,7 @@ static void xfrm_link_failure(struct sk_buff *skb)
static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
{
- if (dst->obsolete)
+ if (READ_ONCE(dst->obsolete))
sk_dst_reset(sk);
}
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 8e07dd614b0b..5e1fd6b1d503 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -45,21 +45,21 @@ static const struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmInStateDirError", LINUX_MIB_XFRMINSTATEDIRERROR),
SNMP_MIB_ITEM("XfrmInIptfsError", LINUX_MIB_XFRMINIPTFSERROR),
SNMP_MIB_ITEM("XfrmOutNoQueueSpace", LINUX_MIB_XFRMOUTNOQSPACE),
- SNMP_MIB_SENTINEL
};
static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
{
- unsigned long buff[LINUX_MIB_XFRMMAX];
+ unsigned long buff[ARRAY_SIZE(xfrm_mib_list)];
+ const int cnt = ARRAY_SIZE(xfrm_mib_list);
struct net *net = seq->private;
int i;
- memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ memset(buff, 0, sizeof(buff));
xfrm_state_update_stats(net);
- snmp_get_cpu_field_batch(buff, xfrm_mib_list,
- net->mib.xfrm_statistics);
- for (i = 0; xfrm_mib_list[i].name; i++)
+ snmp_get_cpu_field_batch_cnt(buff, xfrm_mib_list, cnt,
+ net->mib.xfrm_statistics);
+ for (i = 0; i < cnt; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
buff[i]);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 77cc418ad69e..d213ca3653a8 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -424,11 +424,10 @@ void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
}
EXPORT_SYMBOL(xfrm_unregister_type_offload);
-void xfrm_set_type_offload(struct xfrm_state *x)
+void xfrm_set_type_offload(struct xfrm_state *x, bool try_load)
{
const struct xfrm_type_offload *type = NULL;
struct xfrm_state_afinfo *afinfo;
- bool try_load = true;
retry:
afinfo = xfrm_state_get_afinfo(x->props.family);
@@ -593,7 +592,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
-static void ___xfrm_state_destroy(struct xfrm_state *x)
+static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
if (x->mode_cbs && x->mode_cbs->destroy_state)
x->mode_cbs->destroy_state(x);
@@ -607,6 +606,7 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
kfree(x->coaddr);
kfree(x->replay_esn);
kfree(x->preplay_esn);
+ xfrm_unset_type_offload(x);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
@@ -631,7 +631,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
- ___xfrm_state_destroy(x);
+ xfrm_state_gc_destroy(x);
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -780,8 +780,6 @@ void xfrm_dev_state_free(struct xfrm_state *x)
struct xfrm_dev_offload *xso = &x->xso;
struct net_device *dev = READ_ONCE(xso->dev);
- xfrm_unset_type_offload(x);
-
if (dev && dev->xfrmdev_ops) {
spin_lock_bh(&xfrm_state_dev_gc_lock);
if (!hlist_unhashed(&x->dev_gclist))
@@ -797,22 +795,18 @@ void xfrm_dev_state_free(struct xfrm_state *x)
}
#endif
-void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
+void __xfrm_state_destroy(struct xfrm_state *x)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
- if (sync) {
- synchronize_rcu();
- ___xfrm_state_destroy(x);
- } else {
- spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &xfrm_state_gc_list);
- spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&xfrm_state_gc_work);
- }
+ spin_lock_bh(&xfrm_state_gc_lock);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+ spin_unlock_bh(&xfrm_state_gc_lock);
+ schedule_work(&xfrm_state_gc_work);
}
EXPORT_SYMBOL(__xfrm_state_destroy);
+static void xfrm_state_delete_tunnel(struct xfrm_state *x);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -840,6 +834,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
xfrm_dev_state_delete(x);
+ xfrm_state_delete_tunnel(x);
+
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here.
@@ -921,7 +917,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
}
#endif
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
{
int i, err = 0, cnt = 0;
@@ -943,10 +939,7 @@ restart:
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
- if (sync)
- xfrm_state_put_sync(x);
- else
- xfrm_state_put(x);
+ xfrm_state_put(x);
if (!err)
cnt++;
@@ -1307,14 +1300,8 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
const struct flowi *fl, unsigned short family,
struct xfrm_state **best, int *acq_in_progress,
- int *error)
+ int *error, unsigned int pcpu_id)
{
- /* We need the cpu id just as a lookup key,
- * we don't require it to be stable.
- */
- unsigned int pcpu_id = get_cpu();
- put_cpu();
-
/* Resolution logic:
* 1. There is a valid state with matching selector. Done.
* 2. Valid state with inappropriate selector. Skip.
@@ -1381,14 +1368,15 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
/* We need the cpu id just as a lookup key,
* we don't require it to be stable.
*/
- pcpu_id = get_cpu();
- put_cpu();
+ pcpu_id = raw_smp_processor_id();
to_put = NULL;
sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
@@ -1400,7 +1388,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, encap_family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
if (best)
@@ -1417,7 +1405,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
cached:
@@ -1429,8 +1417,6 @@ cached:
else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
WARN_ON(1);
- xfrm_hash_ptrs_get(net, &state_ptrs);
-
h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
@@ -1460,7 +1446,7 @@ cached:
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
if (best || acquire_in_progress)
goto found;
@@ -1495,7 +1481,7 @@ cached:
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
found:
@@ -1711,6 +1697,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
}
EXPORT_SYMBOL(xfrm_state_lookup_byspi);
+static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto)
+{
+ struct xfrm_state *x;
+ unsigned int i;
+
+ rcu_read_lock();
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) {
+ if (x->id.spi == spi && x->id.proto == proto) {
+ if (!xfrm_state_hold_rcu(x))
+ continue;
+ rcu_read_unlock();
+ return x;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
static void __xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -2262,7 +2268,12 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_stats(x);
+ /* All counters which are needed to decide if state is expired
+ * are handled by SW for non-packet offload modes. Simply skip
+ * the following update and save extra boilerplate in drivers.
+ */
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -2555,10 +2566,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
unsigned int h;
struct xfrm_state *x0;
int err = -ENOENT;
- __be32 minspi = htonl(low);
- __be32 maxspi = htonl(high);
+ u32 range = high - low + 1;
__be32 newspi = 0;
- u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD) {
@@ -2572,38 +2581,37 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
err = -ENOENT;
- if (minspi == maxspi) {
- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
- if (x0) {
- NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
- xfrm_state_put(x0);
+ for (h = 0; h < range; h++) {
+ u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
+ if (spi == 0)
+ goto next;
+ newspi = htonl(spi);
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto);
+ if (!x0) {
+ x->id.spi = newspi;
+ h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family);
+ XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ err = 0;
goto unlock;
}
- newspi = minspi;
- } else {
- u32 spi = 0;
- for (h = 0; h < high-low+1; h++) {
- spi = get_random_u32_inclusive(low, high);
- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
- if (x0 == NULL) {
- newspi = htonl(spi);
- break;
- }
- xfrm_state_put(x0);
+ xfrm_state_put(x0);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+next:
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto unlock;
}
+
+ if (low == high)
+ break;
}
- if (newspi) {
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x->id.spi = newspi;
- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
- XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
- x->xso.type);
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- err = 0;
- } else {
+ if (err)
NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
- }
unlock:
spin_unlock_bh(&x->lock);
@@ -3077,20 +3085,17 @@ void xfrm_flush_gc(void)
}
EXPORT_SYMBOL(xfrm_flush_gc);
-/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
-void xfrm_state_delete_tunnel(struct xfrm_state *x)
+static void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
if (x->tunnel) {
struct xfrm_state *t = x->tunnel;
- if (atomic_read(&t->tunnel_users) == 2)
+ if (atomic_dec_return(&t->tunnel_users) == 1)
xfrm_state_delete(t);
- atomic_dec(&t->tunnel_users);
- xfrm_state_put_sync(t);
+ xfrm_state_put(t);
x->tunnel = NULL;
}
}
-EXPORT_SYMBOL(xfrm_state_delete_tunnel);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
@@ -3295,8 +3300,8 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
+ xfrm_state_flush(net, 0, false);
flush_work(&xfrm_state_gc_work);
- xfrm_state_flush(net, 0, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 59f258daf830..010c9e6638c0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -593,7 +593,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
if (!p)
return -ENOMEM;
- strcpy(p->alg_name, algo->name);
+ strscpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
@@ -620,7 +620,7 @@ static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
if (!p)
return -ENOMEM;
- strcpy(p->alg_name, algo->name);
+ strscpy(p->alg_name, algo->name);
x->ealg = p;
x->geniv = algo->uinfo.encr.geniv;
return 0;
@@ -649,7 +649,7 @@ static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
if (!p)
return -ENOMEM;
- strcpy(p->alg_name, algo->name);
+ strscpy(p->alg_name, algo->name);
p->alg_key_len = ualg->alg_key_len;
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
@@ -684,7 +684,7 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
if (!p)
return -ENOMEM;
- strcpy(p->alg_name, algo->name);
+ strscpy(p->alg_name, algo->name);
if (!p->alg_trunc_len)
p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
@@ -714,7 +714,7 @@ static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
if (!p)
return -ENOMEM;
- strcpy(p->alg_name, algo->name);
+ strscpy(p->alg_name, algo->name);
x->aead = p;
x->geniv = algo->uinfo.aead.geniv;
return 0;
@@ -977,6 +977,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
+ xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
@@ -2634,7 +2635,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
- err = xfrm_state_flush(net, p->proto, true, false);
+ err = xfrm_state_flush(net, p->proto, true);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;