From 16355e610f87c90560e01de785463f35d761244a Mon Sep 17 00:00:00 2001 From: cheloha Date: Sat, 9 Sep 2023 16:59:01 +0000 Subject: [PATCH] clockintr_schedule: tweak logic to eliminate early return With the introduction of clockintr_schedule_locked(), clockintr_schedule() is short enough that the early-return from the non-mutex path doesn't make the function easier to read. Move the mutex path into the else branch. --- sys/kern/kern_clockintr.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index d219f05c219..e6d65659c16 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_clockintr.c,v 1.43 2023/09/09 16:34:39 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.44 2023/09/09 16:59:01 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis @@ -410,12 +410,11 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration) if (cl == &cq->cq_shadow) { cl->cl_expiration = expiration; SET(cl->cl_flags, CLST_SHADOW_PENDING); - return; + } else { + mtx_enter(&cq->cq_mtx); + clockintr_schedule_locked(cl, expiration); + mtx_leave(&cq->cq_mtx); } - - mtx_enter(&cq->cq_mtx); - clockintr_schedule_locked(cl, expiration); - mtx_leave(&cq->cq_mtx); } void -- 2.20.1