From 08261913ef8fa7cde1c769ff7a0ac7684cdc3d73 Mon Sep 17 00:00:00 2001 From: cheloha Date: Thu, 6 Jul 2023 23:24:37 +0000 Subject: [PATCH] timeout_hardclock_update: enter timeout_mutex before reading uptime clock We have no way of knowing how long we will spin before entering timeout_mutex. Enter timeout_mutex first, then wait until just before we start dumping kclock buckets to read the uptime clock. --- sys/kern/kern_timeout.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 9eccb02f7a4..b20616e7e3d 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_timeout.c,v 1.92 2023/06/28 08:23:25 claudio Exp $ */ +/* $OpenBSD: kern_timeout.c,v 1.93 2023/07/06 23:24:37 cheloha Exp $ */ /* * Copyright (c) 2001 Thomas Nordin * Copyright (c) 2000-2001 Artur Grabowski @@ -545,9 +545,6 @@ timeout_hardclock_update(void) struct timespec *lastscan = &timeout_kclock[KCLOCK_UPTIME].kc_lastscan; int b, done, first, i, last, level, need_softclock = 1, off; - nanouptime(&now); - timespecsub(&now, lastscan, &elapsed); - mtx_enter(&timeout_mutex); MOVEBUCKET(0, ticks); @@ -573,6 +570,8 @@ timeout_hardclock_update(void) * completed a lap of the level and need to process buckets in the * next level. */ + nanouptime(&now); + timespecsub(&now, lastscan, &elapsed); for (level = 0; level < nitems(timeout_level_width); level++) { first = timeout_maskwheel(level, lastscan); if (elapsed.tv_sec >= timeout_level_width[level]) { -- 2.20.1