From 4e5507512067ff31d4f2118a4202dec750bff383 Mon Sep 17 00:00:00 2001 From: bluhm Date: Mon, 26 Apr 2021 07:55:16 +0000 Subject: [PATCH] Convert the ARP packet hold queue from mbuf list to mbuf queue which contins a mutex. Update la_hold_total with atomic operations. OK sashan@ --- sys/netinet/if_ether.c | 48 ++++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/sys/netinet/if_ether.c b/sys/netinet/if_ether.c index 79ea9e139df..bdc5d5fd360 100644 --- a/sys/netinet/if_ether.c +++ b/sys/netinet/if_ether.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_ether.c,v 1.245 2021/04/23 21:55:36 bluhm Exp $ */ +/* $OpenBSD: if_ether.c,v 1.246 2021/04/26 07:55:16 bluhm Exp $ */ /* $NetBSD: if_ether.c,v 1.31 1996/05/11 12:59:58 mycroft Exp $ */ /* @@ -68,7 +68,7 @@ struct llinfo_arp { LIST_ENTRY(llinfo_arp) la_list; struct rtentry *la_rt; /* backpointer to rtentry */ - struct mbuf_list la_ml; /* packet hold queue */ + struct mbuf_queue la_mq; /* packet hold queue */ time_t la_refreshed; /* when was refresh sent */ int la_asked; /* number of queries sent */ }; @@ -189,7 +189,7 @@ arp_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt) break; } - ml_init(&la->la_ml); + mq_init(&la->la_mq, LA_HOLD_QUEUE, IPL_SOFTNET); la->la_rt = rt; rt->rt_flags |= RTF_LLINFO; if ((rt->rt_flags & RTF_LOCAL) == 0) @@ -203,7 +203,7 @@ arp_rtrequest(struct ifnet *ifp, int req, struct rtentry *rt) LIST_REMOVE(la, la_list); rt->rt_llinfo = NULL; rt->rt_flags &= ~RTF_LLINFO; - la_hold_total -= ml_purge(&la->la_ml); + atomic_sub_int(&la_hold_total, mq_purge(&la->la_mq)); pool_put(&arp_pool, la); break; @@ -374,18 +374,11 @@ arpresolve(struct ifnet *ifp, struct rtentry *rt0, struct mbuf *m, * response yet. Insert mbuf in hold queue if below limit * if above the limit free the queue without queuing the new packet. */ - if (la_hold_total < LA_HOLD_TOTAL) { - struct mbuf *mh; - - if (ml_len(&la->la_ml) >= LA_HOLD_QUEUE) { - mh = ml_dequeue(&la->la_ml); - la_hold_total--; - m_freem(mh); - } - ml_enqueue(&la->la_ml, m); - la_hold_total++; + if (atomic_inc_int_nv(&la_hold_total) <= LA_HOLD_TOTAL) { + if (mq_push(&la->la_mq, m) != 0) + atomic_dec_int(&la_hold_total); } else { - la_hold_total -= ml_purge(&la->la_ml); + atomic_sub_int(&la_hold_total, mq_purge(&la->la_mq) + 1); m_freem(m); } @@ -414,7 +407,8 @@ arpresolve(struct ifnet *ifp, struct rtentry *rt0, struct mbuf *m, rt->rt_expire += arpt_down; la->la_asked = 0; la->la_refreshed = 0; - la_hold_total -= ml_purge(&la->la_ml); + atomic_sub_int(&la_hold_total, + mq_purge(&la->la_mq)); } } } @@ -600,7 +594,7 @@ arpcache(struct ifnet *ifp, struct ether_arp *ea, struct rtentry *rt) struct in_addr *spa = (struct in_addr *)ea->arp_spa; char addr[INET_ADDRSTRLEN]; struct ifnet *rifp; - unsigned int len; + struct mbuf *m; int changed = 0; KERNEL_ASSERT_LOCKED(); @@ -672,20 +666,18 @@ arpcache(struct ifnet *ifp, struct ether_arp *ea, struct rtentry *rt) la->la_asked = 0; la->la_refreshed = 0; - while ((len = ml_len(&la->la_ml)) != 0) { - struct mbuf *mh; + while ((m = mq_dequeue(&la->la_mq)) != NULL) { + unsigned int len; - mh = ml_dequeue(&la->la_ml); - la_hold_total--; + atomic_dec_int(&la_hold_total); + len = mq_len(&la->la_mq); - ifp->if_output(ifp, mh, rt_key(rt), rt); + ifp->if_output(ifp, m, rt_key(rt), rt); - if (ml_len(&la->la_ml) == len) { + /* XXXSMP we discard if other CPU enqueues */ + if (mq_len(&la->la_mq) > len) { /* mbuf is back in queue. Discard. */ - while ((mh = ml_dequeue(&la->la_ml)) != NULL) { - la_hold_total--; - m_freem(mh); - } + atomic_sub_int(&la_hold_total, mq_purge(&la->la_mq)); break; } } @@ -699,7 +691,7 @@ arpinvalidate(struct rtentry *rt) struct llinfo_arp *la = (struct llinfo_arp *)rt->rt_llinfo; struct sockaddr_dl *sdl = satosdl(rt->rt_gateway); - la_hold_total -= ml_purge(&la->la_ml); + atomic_sub_int(&la_hold_total, mq_purge(&la->la_mq)); sdl->sdl_alen = 0; la->la_asked = 0; } -- 2.20.1