-/* $OpenBSD: route.c,v 1.414 2022/08/29 07:51:45 bluhm Exp $ */
+/* $OpenBSD: route.c,v 1.415 2023/01/21 17:35:01 mvs Exp $ */
/* $NetBSD: route.c,v 1.14 1996/02/13 22:00:46 christos Exp $ */
/*
#include <sys/queue.h>
#include <sys/pool.h>
#include <sys/atomic.h>
+#include <sys/rwlock.h>
#include <net/if.h>
#include <net/if_var.h>
#define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
+struct rwlock rt_lock = RWLOCK_INITIALIZER("rtlck");
+
/* Give some jitter to hash, to avoid synchronization between routers. */
static uint32_t rt_hashjitter;
* It should also be higher to let the ARP layer find
* cloned routes instead of the cloning one.
*/
- KERNEL_LOCK();
+ RT_LOCK();
error = rtrequest(RTM_RESOLVE, &info, rt->rt_priority - 1, &rt,
rtableid);
- KERNEL_UNLOCK();
+ RT_UNLOCK();
if (error) {
rtm_miss(RTM_MISS, &info, 0, RTP_NONE, 0, error, rtableid);
} else {
-/* $OpenBSD: route.h,v 1.196 2022/06/28 10:01:13 bluhm Exp $ */
+/* $OpenBSD: route.h,v 1.197 2023/01/21 17:35:01 mvs Exp $ */
/* $NetBSD: route.h,v 1.9 1996/02/13 22:00:49 christos Exp $ */
/*
#ifdef _KERNEL
#include <sys/percpu.h>
+#include <sys/rwlock.h>
+
+extern struct rwlock rt_lock;
+
+#define RT_LOCK() rw_enter_write(&rt_lock)
+#define RT_UNLOCK() rw_exit_write(&rt_lock)
+#define RT_ASSERT_LOCKED() rw_assert_wrlock(&rt_lock)
enum rtstat_counters {
rts_badredirect, /* bogus redirect calls */
-/* $OpenBSD: if_ether.c,v 1.252 2022/12/07 14:38:29 claudio Exp $ */
+/* $OpenBSD: if_ether.c,v 1.253 2023/01/21 17:35:01 mvs Exp $ */
/* $NetBSD: if_ether.c,v 1.31 1996/05/11 12:59:58 mycroft Exp $ */
/*
if (ifp->if_flags & (IFF_NOARP|IFF_STATICARP))
goto bad;
- KERNEL_LOCK();
+ RT_LOCK();
/*
- * Re-check since we grab the kernel lock after the first check.
+ * Re-check since we grab the route lock after the first check.
* rtrequest_delete() can be called with shared netlock. From
* there arp_rtrequest() is reached which touches RTF_LLINFO
- * and rt_llinfo. As this is called with kernel lock we grab the
- * kernel lock here and are safe. XXXSMP
+ * and rt_llinfo. As this is called with route lock we grab the
+ * route lock here and are safe. XXXSMP
*/
if (!ISSET(rt->rt_flags, RTF_LLINFO)) {
- KERNEL_UNLOCK();
+ RT_UNLOCK();
goto bad;
}
la = (struct llinfo_arp *)rt->rt_llinfo;
}
}
- KERNEL_UNLOCK();
+ RT_UNLOCK();
return (EAGAIN);
bad:
} else if (rt != NULL) {
int error;
- KERNEL_LOCK();
+ RT_LOCK();
error = arpcache(ifp, ea, rt);
- KERNEL_UNLOCK();
+ RT_UNLOCK();
if (error)
goto out;
}
unsigned int len;
int changed = 0;
- KERNEL_ASSERT_LOCKED();
+ RT_ASSERT_LOCKED();
KASSERT(sdl != NULL);
/*