--- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -796,6 +796,9 @@ struct sk_buff { __u8 tc_from_ingress:1; #endif __u8 gro_skip:1; +#ifdef CONFIG_SHORTCUT_FE + __u8 fast_forwarded:1; +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -54,6 +54,8 @@ struct br_ip_list { #define BR_DEFAULT_AGEING_TIME (300 * HZ) extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); +extern void br_dev_update_stats(struct net_device *dev, + struct rtnl_link_stats64 *nlstats); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -20,6 +20,9 @@ struct timer_list { void (*function)(unsigned long); unsigned long data; u32 flags; +#ifdef CONFIG_SHORTCUT_FE + unsigned long cust_data; +#endif #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -74,6 +74,8 @@ struct nf_ct_event { #ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb); extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb); +extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb); +extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb); #else struct nf_ct_event_notifier { int (*fcn)(unsigned int events, struct nf_ct_event *item); --- a/net/Kconfig +++ b/net/Kconfig @@ -463,3 +463,6 @@ config HAVE_CBPF_JIT # Extended BPF JIT (eBPF) config HAVE_EBPF_JIT bool + +config SHORTCUT_FE + bool "Enables kernel network stack path for Shortcut Forwarding Engine --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3007,7 +3007,16 @@ static int xmit_one(struct sk_buff *skb, #else if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) #endif +#ifdef CONFIG_SHORTCUT_FE + /* If this skb has been fast forwarded then we don't want it to + * go to any taps (by definition we're trying to bypass them). + */ + if (!skb->fast_forwarded) { +#endif dev_queue_xmit_nit(skb, dev); +#ifdef CONFIG_SHORTCUT_FE + } +#endif #ifdef CONFIG_ETHERNET_PACKET_MANGLE if (!dev->eth_mangle_tx || @@ -4322,6 +4331,11 @@ void netdev_rx_handler_unregister(struct } EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); +#ifdef CONFIG_SHORTCUT_FE +int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly; +EXPORT_SYMBOL_GPL(athrs_fast_nat_recv); +#endif + /* * Limit the use of PFMEMALLOC reserves to those protocols that implement * the special handling of PFMEMALLOC skbs. @@ -4369,6 +4383,9 @@ static int __netif_receive_skb_core(stru bool deliver_exact = false; int ret = NET_RX_DROP; __be16 type; +#ifdef CONFIG_SHORTCUT_FE + int (*fast_recv)(struct sk_buff *skb); +#endif net_timestamp_check(!netdev_tstamp_prequeue, skb); @@ -4395,6 +4412,16 @@ another_round: goto out; } +#ifdef CONFIG_SHORTCUT_FE + fast_recv = rcu_dereference(athrs_fast_nat_recv); + if (fast_recv) { + if (fast_recv(skb)) { + ret = NET_RX_SUCCESS; + goto out; + } + } +#endif + if (skb_skip_tc_classify(skb)) goto skip_classify; --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -35,11 +35,17 @@ /* Do not check the TCP window for incoming packets */ static int nf_ct_tcp_no_window_check __read_mostly = 1; +#ifdef CONFIG_SHORTCUT_FE +EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check); +#endif /* "Be conservative in what you do, be liberal in what you accept from others." If it's non-zero, we mark only out of window RST segments as INVALID. */ static int nf_ct_tcp_be_liberal __read_mostly = 0; +#ifdef CONFIG_SHORTCUT_FE +EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal); +#endif /* If it is set to zero, we disable picking up already established connections. */ --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -670,3 +670,26 @@ void br_port_flags_change(struct net_bri if (mask & BR_AUTO_MASK) nbp_update_port_count(br); } + +/* Update bridge statistics for bridge packets processed by offload engines */ +void br_dev_update_stats(struct net_device *dev, + struct rtnl_link_stats64 *nlstats) +{ + struct net_bridge *br; + struct pcpu_sw_netstats *stats; + + /* Is this a bridge? */ + if (!(dev->priv_flags & IFF_EBRIDGE)) + return; + + br = netdev_priv(dev); + stats = this_cpu_ptr(br->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += nlstats->rx_packets; + stats->rx_bytes += nlstats->rx_bytes; + stats->tx_packets += nlstats->tx_packets; + stats->tx_bytes += nlstats->tx_bytes; + u64_stats_update_end(&stats->syncp); +} +EXPORT_SYMBOL_GPL(br_dev_update_stats); --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -162,7 +162,11 @@ int nf_conntrack_eventmask_report(unsign rcu_read_lock(); notify = rcu_dereference(net->ct.nf_conntrack_event_cb); +#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) +#else if (!notify) +#endif goto out_unlock; e = nf_ct_ecache_find(ct); @@ -181,7 +185,14 @@ int nf_conntrack_eventmask_report(unsign if (!((eventmask | missed) & e->ctmask)) goto out_unlock; +#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, + eventmask | missed, &item); + if (notify) + ret = notify->fcn(eventmask | missed, &item); +#else ret = notify->fcn(eventmask | missed, &item); +#endif if (unlikely(ret < 0 || missed)) { spin_lock_bh(&ct->lock); if (ret < 0) { @@ -263,7 +274,11 @@ void nf_ct_deliver_cached_events(struct rcu_read_lock(); notify = rcu_dereference(net->ct.nf_conntrack_event_cb); +#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) +#else if (notify == NULL) +#endif goto out_unlock; e = nf_ct_ecache_find(ct); @@ -287,7 +302,15 @@ void nf_ct_deliver_cached_events(struct item.portid = 0; item.report = 0; +#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, + events | missed, + &item); + if (notify != NULL) + ret = notify->fcn(events | missed, &item); +#else ret = notify->fcn(events | missed, &item); +#endif if (likely(ret == 0 && !missed)) goto out_unlock; @@ -340,6 +363,11 @@ int nf_conntrack_register_notifier(struc { return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); } +int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier); #else int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *new) @@ -369,6 +397,11 @@ int nf_conntrack_unregister_notifier(str { return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); } +int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); +} +EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier); #else void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *new)