From 0b9ea2785a44171b5f54de1d7806f4a554e50e47 Mon Sep 17 00:00:00 2001 From: mvs Date: Sat, 16 Dec 2023 22:16:02 +0000 Subject: [PATCH] Rework pflowioctl() lock dances. Release netlock and take `sc_lock' rwlock(9) just in the beginning of pflowioctl() and do corresponding operations in the end. Use `sc_lock' to protect `sc_dying'. We need to release netlock not only to keep locks order with `sc_lock' rwlock(9), but also because pflowioctl() calls some operations like socreate() or soclose() on udp(4) socket. Current implementation has many relocking places which breaks atomicy, so merge them into one. The `sc_lock' rwlock(9) is taken during all pflowioctl() call, so `sc_dying' atomicy is not broken. Not the ideal solution, but better then we have now. Tested by Hrvoje Popovski. Discussed with and ok from sashan --- sys/net/if_pflow.c | 86 ++++++++++++++++++++++++++-------------------- sys/net/if_pflow.h | 5 ++- 2 files changed, 51 insertions(+), 40 deletions(-) diff --git a/sys/net/if_pflow.c b/sys/net/if_pflow.c index f8b19735ab1..94608c56dfd 100644 --- a/sys/net/if_pflow.c +++ b/sys/net/if_pflow.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_pflow.c,v 1.105 2023/12/12 12:38:52 mvs Exp $ */ +/* $OpenBSD: if_pflow.c,v 1.106 2023/12/16 22:16:02 mvs Exp $ */ /* * Copyright (c) 2011 Florian Obser @@ -297,9 +297,9 @@ pflow_clone_destroy(struct ifnet *ifp) error = 0; - NET_LOCK(); + rw_enter_write(&sc->sc_lock); sc->sc_dying = 1; - NET_UNLOCK(); + rw_exit_write(&sc->sc_lock); KERNEL_ASSERT_LOCKED(); SMR_SLIST_REMOVE_LOCKED(&pflowif_list, sc, pflow_softc, sc_next); @@ -483,6 +483,7 @@ pflow_set(struct pflow_softc *sc, struct pflowreq *pflowr) sc->so = NULL; } + NET_LOCK(); mtx_enter(&sc->sc_mtx); /* error check is above */ @@ -504,6 +505,7 @@ pflow_set(struct pflow_softc *sc, struct pflowreq *pflowr) } mtx_leave(&sc->sc_mtx); + NET_UNLOCK(); return (0); } @@ -515,18 +517,33 @@ pflowioctl(struct ifnet *ifp, u_long cmd, caddr_t data) struct pflow_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct pflowreq pflowr; - int error; + int error = 0; - if (sc->sc_dying) - return ENXIO; + switch (cmd) { + case SIOCSIFADDR: + case SIOCSIFDSTADDR: + case SIOCSIFFLAGS: + case SIOCSIFMTU: + case SIOCGETPFLOW: + case SIOCSETPFLOW: + break; + default: + return (ENOTTY); + } + + /* XXXSMP: enforce lock order */ + NET_UNLOCK(); + rw_enter_write(&sc->sc_lock); + + if (sc->sc_dying) { + error = ENXIO; + goto out; + } switch (cmd) { case SIOCSIFADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: - /* XXXSMP: enforce lock order */ - NET_UNLOCK(); - rw_enter_read(&sc->sc_lock); NET_LOCK(); if ((ifp->if_flags & IFF_UP) && sc->so != NULL) { ifp->if_flags |= IFF_RUNNING; @@ -537,60 +554,53 @@ pflowioctl(struct ifnet *ifp, u_long cmd, caddr_t data) mtx_leave(&sc->sc_mtx); } else ifp->if_flags &= ~IFF_RUNNING; - rw_exit_read(&sc->sc_lock); + NET_UNLOCK(); break; + case SIOCSIFMTU: - if (ifr->ifr_mtu < PFLOW_MINMTU) - return (EINVAL); + if (ifr->ifr_mtu < PFLOW_MINMTU) { + error = EINVAL; + goto out; + } if (ifr->ifr_mtu > MCLBYTES) ifr->ifr_mtu = MCLBYTES; + NET_LOCK(); if (ifr->ifr_mtu < ifp->if_mtu) pflow_flush(sc); mtx_enter(&sc->sc_mtx); pflow_setmtu(sc, ifr->ifr_mtu); mtx_leave(&sc->sc_mtx); + NET_UNLOCK(); break; case SIOCGETPFLOW: bzero(&pflowr, sizeof(pflowr)); - /* XXXSMP: enforce lock order */ - NET_UNLOCK(); - rw_enter_read(&sc->sc_lock); - NET_LOCK(); if (sc->sc_flowsrc != NULL) memcpy(&pflowr.flowsrc, sc->sc_flowsrc, sc->sc_flowsrc->sa_len); if (sc->sc_flowdst != NULL) memcpy(&pflowr.flowdst, sc->sc_flowdst, sc->sc_flowdst->sa_len); - rw_exit_read(&sc->sc_lock); mtx_enter(&sc->sc_mtx); pflowr.version = sc->sc_version; mtx_leave(&sc->sc_mtx); - if ((error = copyout(&pflowr, ifr->ifr_data, - sizeof(pflowr)))) - return (error); + if ((error = copyout(&pflowr, ifr->ifr_data, sizeof(pflowr)))) + goto out; break; case SIOCSETPFLOW: if ((error = suser(p)) != 0) - return (error); - if ((error = copyin(ifr->ifr_data, &pflowr, - sizeof(pflowr)))) - return (error); + goto out; + if ((error = copyin(ifr->ifr_data, &pflowr, sizeof(pflowr)))) + goto out; - /* XXXSMP breaks atomicity */ - NET_UNLOCK(); - rw_enter_write(&sc->sc_lock); error = pflow_set(sc, &pflowr); - NET_LOCK(); - if (error != 0) { - rw_exit_write(&sc->sc_lock); - return (error); - } + if (error != 0) + goto out; + NET_LOCK(); if ((ifp->if_flags & IFF_UP) && sc->so != NULL) { ifp->if_flags |= IFF_RUNNING; mtx_enter(&sc->sc_mtx); @@ -599,14 +609,16 @@ pflowioctl(struct ifnet *ifp, u_long cmd, caddr_t data) mtx_leave(&sc->sc_mtx); } else ifp->if_flags &= ~IFF_RUNNING; - rw_exit_write(&sc->sc_lock); + NET_UNLOCK(); break; - - default: - return (ENOTTY); } - return (0); + +out: + rw_exit_write(&sc->sc_lock); + NET_LOCK(); + + return (error); } int diff --git a/sys/net/if_pflow.h b/sys/net/if_pflow.h index a4cc724d06f..13dc420b07d 100644 --- a/sys/net/if_pflow.h +++ b/sys/net/if_pflow.h @@ -1,4 +1,4 @@ -/* $OpenBSD: if_pflow.h,v 1.22 2023/12/12 00:03:31 mvs Exp $ */ +/* $OpenBSD: if_pflow.h,v 1.23 2023/12/16 22:16:02 mvs Exp $ */ /* * Copyright (c) 2008 Henning Brauer @@ -174,7 +174,6 @@ struct pflow_ipfix_flow6 { /* * Locks used to protect struct members and global data * I immutable after creation - * N net lock * m this pflow_softc' `sc_mtx' * p this pflow_softc' `sc_lock' */ @@ -183,7 +182,7 @@ struct pflow_softc { struct mutex sc_mtx; struct rwlock sc_lock; - int sc_dying; /* [N] */ + int sc_dying; /* [p] */ struct ifnet sc_if; unsigned int sc_count; /* [m] */ -- 2.20.1