208 lines
6.2 KiB
Diff
208 lines
6.2 KiB
Diff
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -758,6 +758,9 @@ struct sk_buff {
|
|
__u8 offload_fwd_mark:1;
|
|
#endif
|
|
__u8 gro_skip:1;
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ __u8 fast_forwarded:1;
|
|
+#endif
|
|
/* 1, 3 or 4 bit hole */
|
|
|
|
#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
--- a/include/linux/timer.h
|
|
+++ b/include/linux/timer.h
|
|
@@ -20,6 +20,10 @@ struct timer_list {
|
|
unsigned long data;
|
|
u32 flags;
|
|
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ unsigned long cust_data;
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_TIMER_STATS
|
|
int start_pid;
|
|
void *start_site;
|
|
--- a/include/net/netfilter/nf_conntrack_ecache.h
|
|
+++ b/include/net/netfilter/nf_conntrack_ecache.h
|
|
@@ -73,6 +73,8 @@ struct nf_ct_event {
|
|
#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
|
extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb);
|
|
extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb);
|
|
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
|
|
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
|
|
#else
|
|
struct nf_ct_event_notifier {
|
|
int (*fcn)(unsigned int events, struct nf_ct_event *item);
|
|
--- a/net/Kconfig
|
|
+++ b/net/Kconfig
|
|
@@ -444,3 +444,6 @@ config HAVE_CBPF_JIT
|
|
# Extended BPF JIT (eBPF)
|
|
config HAVE_EBPF_JIT
|
|
bool
|
|
+
|
|
+config SHORTCUT_FE
|
|
+ bool "Enables kernel network stack path for Shortcut Forwarding Engine"
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -2974,7 +2974,16 @@ static int xmit_one(struct sk_buff *skb,
|
|
#else
|
|
if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
|
|
#endif
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ /* If this skb has been fast forwarded then we don't want it to
|
|
+ * go to any taps (by definition we're trying to bypass them).
|
|
+ */
|
|
+ if (!skb->fast_forwarded) {
|
|
+#endif
|
|
dev_queue_xmit_nit(skb, dev);
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ }
|
|
+#endif
|
|
|
|
#ifdef CONFIG_ETHERNET_PACKET_MANGLE
|
|
if (!dev->eth_mangle_tx ||
|
|
@@ -4126,6 +4135,11 @@ void netdev_rx_handler_unregister(struct
|
|
}
|
|
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
|
|
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
|
|
+EXPORT_SYMBOL_GPL(athrs_fast_nat_recv);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Limit the use of PFMEMALLOC reserves to those protocols that implement
|
|
* the special handling of PFMEMALLOC skbs.
|
|
@@ -4174,6 +4188,10 @@ static int __netif_receive_skb_core(stru
|
|
int ret = NET_RX_DROP;
|
|
__be16 type;
|
|
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ int (*fast_recv)(struct sk_buff *skb);
|
|
+#endif
|
|
+
|
|
net_timestamp_check(!netdev_tstamp_prequeue, skb);
|
|
|
|
trace_netif_receive_skb(skb);
|
|
@@ -4199,6 +4217,16 @@ another_round:
|
|
goto out;
|
|
}
|
|
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
|
|
+ if (fast_recv) {
|
|
+ if (fast_recv(skb)) {
|
|
+ ret = NET_RX_SUCCESS;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
if (skb->tc_verd & TC_NCLS) {
|
|
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
|
|
--- a/net/netfilter/nf_conntrack_ecache.c
|
|
+++ b/net/netfilter/nf_conntrack_ecache.c
|
|
@@ -162,7 +162,11 @@ int nf_conntrack_eventmask_report(unsign
|
|
|
|
rcu_read_lock();
|
|
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
|
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
|
|
+#else
|
|
if (!notify)
|
|
+#endif
|
|
goto out_unlock;
|
|
|
|
e = nf_ct_ecache_find(ct);
|
|
@@ -181,7 +185,14 @@ int nf_conntrack_eventmask_report(unsign
|
|
if (!((eventmask | missed) & e->ctmask))
|
|
goto out_unlock;
|
|
|
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
|
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
|
|
+ eventmask | missed, &item);
|
|
+ if (notify)
|
|
+ ret = notify->fcn(eventmask | missed, &item);
|
|
+#else
|
|
ret = notify->fcn(eventmask | missed, &item);
|
|
+#endif
|
|
if (unlikely(ret < 0 || missed)) {
|
|
spin_lock_bh(&ct->lock);
|
|
if (ret < 0) {
|
|
@@ -263,7 +274,11 @@ void nf_ct_deliver_cached_events(struct
|
|
|
|
rcu_read_lock();
|
|
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
|
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
|
+ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
|
|
+#else
|
|
if (notify == NULL)
|
|
+#endif
|
|
goto out_unlock;
|
|
|
|
e = nf_ct_ecache_find(ct);
|
|
@@ -287,7 +302,15 @@ void nf_ct_deliver_cached_events(struct
|
|
item.portid = 0;
|
|
item.report = 0;
|
|
|
|
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
|
|
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
|
|
+ events | missed,
|
|
+ &item);
|
|
+ if (notify != NULL)
|
|
+ ret = notify->fcn(events | missed, &item);
|
|
+#else
|
|
ret = notify->fcn(events | missed, &item);
|
|
+#endif
|
|
|
|
if (likely(ret >= 0 && !missed))
|
|
goto out_unlock;
|
|
@@ -340,6 +363,11 @@ int nf_conntrack_register_notifier(struc
|
|
{
|
|
return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
|
|
}
|
|
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
|
|
+{
|
|
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
|
|
#else
|
|
int nf_conntrack_register_notifier(struct net *net,
|
|
struct nf_ct_event_notifier *new)
|
|
@@ -369,6 +397,11 @@ int nf_conntrack_unregister_notifier(str
|
|
{
|
|
return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
|
|
}
|
|
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
|
|
+{
|
|
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
|
|
#else
|
|
void nf_conntrack_unregister_notifier(struct net *net,
|
|
struct nf_ct_event_notifier *new)
|
|
--- a/net/netfilter/nf_conntrack_proto_tcp.c
|
|
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
|
|
@@ -35,11 +35,19 @@
|
|
|
|
/* Do not check the TCP window for incoming packets */
|
|
static int nf_ct_tcp_no_window_check __read_mostly = 1;
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check);
|
|
+#endif
|
|
+
|
|
|
|
/* "Be conservative in what you do,
|
|
be liberal in what you accept from others."
|
|
If it's non-zero, we mark only out of window RST segments as INVALID. */
|
|
static int nf_ct_tcp_be_liberal __read_mostly = 0;
|
|
+#ifdef CONFIG_SHORTCUT_FE
|
|
+EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal);
|
|
+#endif
|
|
+
|
|
|
|
/* If it is set to zero, we disable picking up already established
|
|
connections. */
|