From: dlg Date: Wed, 9 Nov 2022 22:15:50 +0000 (+0000) Subject: revert r1.673: replace SRP with SMR in the if_idxmap. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=74e5fbe8e166d85c35f4e134064a22f19cac6d6b;p=openbsd revert r1.673: replace SRP with SMR in the if_idxmap. if the map has to be reallocated during boot, there's an smr_barrier waiting for the old map to become unused. that barrier ends up waiting for cpus that aren't running yet because we haven't finished booting yet, so boot gets stuck. found by hrvoje popovski --- diff --git a/sys/net/if.c b/sys/net/if.c index 96d8b94cae1..dcdc14c0eac 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if.c,v 1.675 2022/11/09 13:09:30 kn Exp $ */ +/* $OpenBSD: if.c,v 1.676 2022/11/09 22:15:50 dlg Exp $ */ /* $NetBSD: if.c,v 1.35 1996/05/07 05:26:04 thorpej Exp $ */ /* @@ -87,7 +87,6 @@ #include #include /* uintptr_t */ #include -#include #include #include @@ -196,21 +195,30 @@ void if_ifp_dtor(void *, void *); void if_map_dtor(void *, void *); struct ifnet *if_ref(struct ifnet *); +/* + * struct if_map + * + * bounded array of ifnet srp pointers used to fetch references of live + * interfaces with if_get(). + */ + +struct if_map { + unsigned long limit; + /* followed by limit ifnet srp pointers */ +}; + /* * struct if_idxmap * * infrastructure to manage updates and accesses to the current if_map. - * - * interface index 0 is special and represents "no interface", so we - * use the 0th slot in map to store the length of the array. */ struct if_idxmap { - unsigned int serial; - unsigned int count; - struct ifnet **map; /* SMR protected */ - struct rwlock lock; - unsigned char *usedidx; /* bitmap of indices in use */ + unsigned int serial; + unsigned int count; + struct srp map; + struct rwlock lock; + unsigned char *usedidx; /* bitmap of indices in use */ }; void if_idxmap_init(unsigned int); @@ -257,7 +265,7 @@ ifinit(void) * most machines boot with 4 or 5 interfaces, so size the initial map * to accommodate this */ - if_idxmap_init(8); /* 8 is a nice power of 2 for malloc */ + if_idxmap_init(8); for (i = 0; i < NET_TASKQ; i++) { nettqmp[i] = taskq_create("softnet", 1, IPL_NET, TASKQ_MPSAFE); @@ -266,41 +274,48 @@ ifinit(void) } } -static struct if_idxmap if_idxmap; +static struct if_idxmap if_idxmap = { + 0, + 0, + SRP_INITIALIZER(), + RWLOCK_INITIALIZER("idxmaplk"), + NULL, +}; -struct ifnet_head ifnetlist = TAILQ_HEAD_INITIALIZER(ifnetlist); +struct srp_gc if_ifp_gc = SRP_GC_INITIALIZER(if_ifp_dtor, NULL); +struct srp_gc if_map_gc = SRP_GC_INITIALIZER(if_map_dtor, NULL); -static inline unsigned int -if_idxmap_limit(struct ifnet **if_map) -{ - return ((uintptr_t)if_map[0]); -} +struct ifnet_head ifnetlist = TAILQ_HEAD_INITIALIZER(ifnetlist); void if_idxmap_init(unsigned int limit) { - struct ifnet **if_map; + struct if_map *if_map; + struct srp *map; + unsigned int i; - rw_init(&if_idxmap.lock, "idxmaplk"); - if_idxmap.serial = 1; /* skip ifidx 0 */ + if_idxmap.serial = 1; /* skip ifidx 0 so it can return NULL */ - if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR, - M_WAITOK | M_ZERO); + if_map = malloc(sizeof(*if_map) + limit * sizeof(*map), + M_IFADDR, M_WAITOK); - if_map[0] = (struct ifnet *)(uintptr_t)limit; + if_map->limit = limit; + map = (struct srp *)(if_map + 1); + for (i = 0; i < limit; i++) + srp_init(&map[i]); if_idxmap.usedidx = malloc(howmany(limit, NBBY), M_IFADDR, M_WAITOK | M_ZERO); /* this is called early so there's nothing to race with */ - SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map); + srp_update_locked(&if_map_gc, &if_idxmap.map, if_map); } void if_idxmap_alloc(struct ifnet *ifp) { - struct ifnet **if_map, **oif_map = NULL; - unsigned int limit, olimit; + struct if_map *if_map; + struct srp *map; unsigned int index, i; refcnt_init(&ifp->if_refcnt); @@ -310,41 +325,49 @@ if_idxmap_alloc(struct ifnet *ifp) if (++if_idxmap.count >= USHRT_MAX) panic("too many interfaces"); - if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); - limit = if_idxmap_limit(if_map); + if_map = srp_get_locked(&if_idxmap.map); + map = (struct srp *)(if_map + 1); index = if_idxmap.serial++ & USHRT_MAX; - if (index >= limit) { + if (index >= if_map->limit) { + struct if_map *nif_map; + struct srp *nmap; + unsigned int nlimit; + struct ifnet *nifp; unsigned char *nusedidx; - oif_map = if_map; - olimit = limit; - - limit = olimit * 2; - if_map = mallocarray(limit, sizeof(*if_map), M_IFADDR, - M_WAITOK | M_ZERO); - if_map[0] = (struct ifnet *)(uintptr_t)limit; - - for (i = 1; i < olimit; i++) { - struct ifnet *oifp = SMR_PTR_GET_LOCKED(&oif_map[i]); - if (oifp == NULL) - continue; + nlimit = if_map->limit * 2; + nif_map = malloc(sizeof(*nif_map) + nlimit * sizeof(*nmap), + M_IFADDR, M_WAITOK); + nmap = (struct srp *)(nif_map + 1); + + nif_map->limit = nlimit; + for (i = 0; i < if_map->limit; i++) { + srp_init(&nmap[i]); + nifp = srp_get_locked(&map[i]); + if (nifp != NULL) { + srp_update_locked(&if_ifp_gc, &nmap[i], + if_ref(nifp)); + } + } - /* - * nif_map isn't visible yet, so don't need - * SMR_PTR_SET_LOCKED and its membar. - */ - if_map[i] = if_ref(oifp); + while (i < nlimit) { + srp_init(&nmap[i]); + i++; } - nusedidx = malloc(howmany(limit, NBBY), + nusedidx = malloc(howmany(nlimit, NBBY), M_IFADDR, M_WAITOK | M_ZERO); - memcpy(nusedidx, if_idxmap.usedidx, howmany(olimit, NBBY)); - free(if_idxmap.usedidx, M_IFADDR, howmany(olimit, NBBY)); + memcpy(nusedidx, if_idxmap.usedidx, + howmany(if_map->limit, NBBY)); + free(if_idxmap.usedidx, M_IFADDR, + howmany(if_map->limit, NBBY)); if_idxmap.usedidx = nusedidx; - SMR_PTR_SET_LOCKED(&if_idxmap.map, if_map); + srp_update_locked(&if_map_gc, &if_idxmap.map, nif_map); + if_map = nif_map; + map = nmap; } /* pick the next free index */ @@ -354,40 +377,32 @@ if_idxmap_alloc(struct ifnet *ifp) index = if_idxmap.serial++ & USHRT_MAX; } - KASSERT(index != 0 && index < limit); + KASSERT(index != 0 && index < if_map->limit); KASSERT(isclr(if_idxmap.usedidx, index)); setbit(if_idxmap.usedidx, index); ifp->if_index = index; rw_exit_write(&if_idxmap.lock); - - if (oif_map != NULL) { - smr_barrier(); - for (i = 1; i < olimit; i++) - if_put(oif_map[i]); - free(oif_map, M_IFADDR, olimit * sizeof(*oif_map)); - } } void if_idxmap_insert(struct ifnet *ifp) { - struct ifnet **if_map; + struct if_map *if_map; + struct srp *map; unsigned int index = ifp->if_index; rw_enter_write(&if_idxmap.lock); - if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); + if_map = srp_get_locked(&if_idxmap.map); + map = (struct srp *)(if_map + 1); - KASSERTMSG(index != 0 && index < if_idxmap_limit(if_map), - "%s(%p) index %u vs limit %u", ifp->if_xname, ifp, index, - if_idxmap_limit(if_map)); - KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == NULL); + KASSERT(index != 0 && index < if_map->limit); KASSERT(isset(if_idxmap.usedidx, index)); /* commit */ - SMR_PTR_SET_LOCKED(&if_map[index], if_ref(ifp)); + srp_update_locked(&if_ifp_gc, &map[index], if_ref(ifp)); rw_exit_write(&if_idxmap.lock); } @@ -395,29 +410,53 @@ if_idxmap_insert(struct ifnet *ifp) void if_idxmap_remove(struct ifnet *ifp) { - struct ifnet **if_map; - unsigned int index = ifp->if_index; + struct if_map *if_map; + struct srp *map; + unsigned int index; - rw_enter_write(&if_idxmap.lock); + index = ifp->if_index; - if_map = SMR_PTR_GET_LOCKED(&if_idxmap.map); + rw_enter_write(&if_idxmap.lock); - KASSERT(index != 0 && index < if_idxmap_limit(if_map)); - KASSERT(SMR_PTR_GET_LOCKED(&if_map[index]) == ifp); - KASSERT(isset(if_idxmap.usedidx, index)); + if_map = srp_get_locked(&if_idxmap.map); + KASSERT(index < if_map->limit); - SMR_PTR_SET_LOCKED(&if_map[index], NULL); + map = (struct srp *)(if_map + 1); + KASSERT(ifp == (struct ifnet *)srp_get_locked(&map[index])); + srp_update_locked(&if_ifp_gc, &map[index], NULL); if_idxmap.count--; + + KASSERT(isset(if_idxmap.usedidx, index)); clrbit(if_idxmap.usedidx, index); /* end of if_idxmap modifications */ rw_exit_write(&if_idxmap.lock); +} - smr_barrier(); +void +if_ifp_dtor(void *null, void *ifp) +{ if_put(ifp); } +void +if_map_dtor(void *null, void *m) +{ + struct if_map *if_map = m; + struct srp *map = (struct srp *)(if_map + 1); + unsigned int i; + + /* + * dont need to serialize the use of update_locked since this is + * the last reference to this map. there's nothing to race against. + */ + for (i = 0; i < if_map->limit; i++) + srp_update_locked(&if_ifp_gc, &map[i], NULL); + + free(if_map, M_IFADDR, sizeof(*if_map) + if_map->limit * sizeof(*map)); +} + /* * Attach an interface to the * list of "active" interfaces. @@ -931,6 +970,14 @@ if_netisr(void *unused) t |= n; } +#if NPFSYNC > 0 + if (t & (1 << NETISR_PFSYNC)) { + KERNEL_LOCK(); + pfsyncintr(); + KERNEL_UNLOCK(); + } +#endif + NET_UNLOCK(); } @@ -1733,22 +1780,22 @@ if_unit(const char *name) struct ifnet * if_get(unsigned int index) { - struct ifnet **if_map; + struct srp_ref sr; + struct if_map *if_map; + struct srp *map; struct ifnet *ifp = NULL; - if (index == 0) - return (NULL); + if_map = srp_enter(&sr, &if_idxmap.map); + if (index < if_map->limit) { + map = (struct srp *)(if_map + 1); - smr_read_enter(); - if_map = SMR_PTR_GET(&if_idxmap.map); - if (index < if_idxmap_limit(if_map)) { - ifp = SMR_PTR_GET(&if_map[index]); + ifp = srp_follow(&sr, &map[index]); if (ifp != NULL) { KASSERT(ifp->if_index == index); if_ref(ifp); } } - smr_read_leave(); + srp_leave(&sr); return (ifp); }