From 531a3018ed0b5ff3d40ed3db66f132f945c21f37 Mon Sep 17 00:00:00 2001 From: dlg Date: Tue, 2 Jan 2018 07:08:10 +0000 Subject: [PATCH] reintroduce tx mitigation to quote the previous commit: > this replaces ifq_start with code that waits until at least 4 packets > have been queued on the ifq before calling the drivers start routine. > if less than 4 packets get queued, the start routine is called from > a task in a softnet tq. > > 4 packets was chosen this time based on testing sephe did in dfly > which showed no real improvement when bundling more packets. hrvoje > popovski tested this on several nics and found an improvement of > 10 to 20 percent when forwarding across the board. > > because some of the ifq's work could be sitting on a softnet tq, > ifq_barrier now calls taskq_barrier to guarantee any work that was > pending there has finished. > > ok mpi@ visa@ this was backed out because of a race in the net80211 stack that anton@ hit. mpi@ committed a workaround for it in revision 1.30 of src/sys/net80211/ieee80211_pae_output.c. im putting this in again so we can see what breaks next. --- sys/net/ifq.c | 43 ++++++++++++++++++++++++++++++++++++++++++- sys/net/ifq.h | 11 ++++------- 2 files changed, 46 insertions(+), 8 deletions(-) diff --git a/sys/net/ifq.c b/sys/net/ifq.c index add143c1acb..6b71376a1b4 100644 --- a/sys/net/ifq.c +++ b/sys/net/ifq.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ifq.c,v 1.19 2017/12/15 01:40:39 dlg Exp $ */ +/* $OpenBSD: ifq.c,v 1.20 2018/01/02 07:08:10 dlg Exp $ */ /* * Copyright (c) 2015 David Gwynne @@ -70,9 +70,16 @@ struct priq { void ifq_start_task(void *); void ifq_restart_task(void *); void ifq_barrier_task(void *); +void ifq_bundle_task(void *); #define TASK_ONQUEUE 0x1 +static inline void +ifq_run_start(struct ifqueue *ifq) +{ + ifq_serialize(ifq, &ifq->ifq_start); +} + void ifq_serialize(struct ifqueue *ifq, struct task *t) { @@ -113,6 +120,16 @@ ifq_is_serialized(struct ifqueue *ifq) return (ifq->ifq_serializer == curcpu()); } +void +ifq_start(struct ifqueue *ifq) +{ + if (ifq_len(ifq) >= min(4, ifq->ifq_maxlen)) { + task_del(ifq->ifq_softnet, &ifq->ifq_bundle); + ifq_run_start(ifq); + } else + task_add(ifq->ifq_softnet, &ifq->ifq_bundle); +} + void ifq_start_task(void *p) { @@ -136,12 +153,32 @@ ifq_restart_task(void *p) ifp->if_qstart(ifq); } +void +ifq_bundle_task(void *p) +{ + struct ifqueue *ifq = p; + + ifq_run_start(ifq); +} + void ifq_barrier(struct ifqueue *ifq) { struct cond c = COND_INITIALIZER(); struct task t = TASK_INITIALIZER(ifq_barrier_task, &c); + if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) { + int netlocked = (rw_status(&netlock) == RW_WRITE); + + if (netlocked) /* XXXSMP breaks atomicity */ + NET_UNLOCK(); + + taskq_barrier(ifq->ifq_softnet); + + if (netlocked) + NET_LOCK(); + } + if (ifq->ifq_serializer == NULL) return; @@ -166,6 +203,7 @@ void ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx) { ifq->ifq_if = ifp; + ifq->ifq_softnet = net_tq(ifp->if_index); ifq->ifq_softc = NULL; mtx_init(&ifq->ifq_mtx, IPL_NET); @@ -187,6 +225,7 @@ ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx) mtx_init(&ifq->ifq_task_mtx, IPL_NET); TAILQ_INIT(&ifq->ifq_task_list); ifq->ifq_serializer = NULL; + task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq); task_set(&ifq->ifq_start, ifq_start_task, ifq); task_set(&ifq->ifq_restart, ifq_restart_task, ifq); @@ -238,6 +277,8 @@ ifq_destroy(struct ifqueue *ifq) { struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + ifq_barrier(ifq); /* ensure nothing is running with the ifq */ + /* don't need to lock because this is the last use of the ifq */ ifq->ifq_ops->ifqop_purge(ifq, &ml); diff --git a/sys/net/ifq.h b/sys/net/ifq.h index 8e0fcd8f01d..345d1068594 100644 --- a/sys/net/ifq.h +++ b/sys/net/ifq.h @@ -1,4 +1,4 @@ -/* $OpenBSD: ifq.h,v 1.18 2017/12/21 03:59:32 dlg Exp $ */ +/* $OpenBSD: ifq.h,v 1.19 2018/01/02 07:08:10 dlg Exp $ */ /* * Copyright (c) 2015 David Gwynne @@ -25,6 +25,7 @@ struct ifq_ops; struct ifqueue { struct ifnet *ifq_if; + struct taskq *ifq_softnet; union { void *_ifq_softc; /* @@ -57,6 +58,7 @@ struct ifqueue { struct mutex ifq_task_mtx; struct task_list ifq_task_list; void *ifq_serializer; + struct task ifq_bundle; /* work to be serialised */ struct task ifq_start; @@ -405,6 +407,7 @@ void ifq_attach(struct ifqueue *, const struct ifq_ops *, void *); void ifq_destroy(struct ifqueue *); void ifq_add_data(struct ifqueue *, struct if_data *); int ifq_enqueue(struct ifqueue *, struct mbuf *); +void ifq_start(struct ifqueue *); struct mbuf *ifq_deq_begin(struct ifqueue *); void ifq_deq_commit(struct ifqueue *, struct mbuf *); void ifq_deq_rollback(struct ifqueue *, struct mbuf *); @@ -440,12 +443,6 @@ ifq_is_oactive(struct ifqueue *ifq) return (ifq->ifq_oactive); } -static inline void -ifq_start(struct ifqueue *ifq) -{ - ifq_serialize(ifq, &ifq->ifq_start); -} - static inline void ifq_restart(struct ifqueue *ifq) { -- 2.20.1