From: visa Date: Sun, 16 Apr 2017 14:28:07 +0000 (+0000) Subject: Replace fetch_and_add() with atomic_inc_int_nv() from X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=8e6185f1908bd67b87ca9279b82b89927dff5dcb;p=openbsd Replace fetch_and_add() with atomic_inc_int_nv() from to make the code more similar to sparc64's. OK mpi@, guenther@, kettenis@ --- diff --git a/sys/arch/amd64/amd64/lock_machdep.c b/sys/arch/amd64/amd64/lock_machdep.c index 7588e24ef62..f02efe0bb04 100644 --- a/sys/arch/amd64/amd64/lock_machdep.c +++ b/sys/arch/amd64/amd64/lock_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: lock_machdep.c,v 1.11 2017/03/07 14:41:57 visa Exp $ */ +/* $OpenBSD: lock_machdep.c,v 1.12 2017/04/16 14:28:07 visa Exp $ */ /* * Copyright (c) 2007 Artur Grabowski @@ -19,8 +19,8 @@ #include #include +#include -#include #include #include @@ -31,7 +31,7 @@ __mp_lock_init(struct __mp_lock *mpl) { memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus)); mpl->mpl_users = 0; - mpl->mpl_ticket = 0; + mpl->mpl_ticket = 1; } #if defined(MP_LOCKDEBUG) @@ -64,17 +64,6 @@ __mp_lock_spin(struct __mp_lock *mpl, u_int me) #endif } -static inline u_int -fetch_and_add(u_int *var, u_int value) -{ - asm volatile("lock; xaddl %%eax, %2;" - : "=a" (value) - : "a" (value), "m" (*var) - : "memory"); - - return (value); -} - void __mp_lock(struct __mp_lock *mpl) { @@ -83,7 +72,7 @@ __mp_lock(struct __mp_lock *mpl) disable_intr(); if (cpu->mplc_depth++ == 0) - cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users); write_rflags(rf); __mp_lock_spin(mpl, cpu->mplc_ticket); diff --git a/sys/arch/i386/i386/lock_machdep.c b/sys/arch/i386/i386/lock_machdep.c index 17d402658e2..9ab26924484 100644 --- a/sys/arch/i386/i386/lock_machdep.c +++ b/sys/arch/i386/i386/lock_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: lock_machdep.c,v 1.20 2017/03/07 14:41:57 visa Exp $ */ +/* $OpenBSD: lock_machdep.c,v 1.21 2017/04/16 14:28:07 visa Exp $ */ /* $NetBSD: lock_machdep.c,v 1.1.2.3 2000/05/03 14:40:30 sommerfeld Exp $ */ /*- @@ -37,8 +37,8 @@ #include #include +#include -#include #include #include @@ -50,7 +50,7 @@ __mp_lock_init(struct __mp_lock *mpl) { memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus)); mpl->mpl_users = 0; - mpl->mpl_ticket = 0; + mpl->mpl_ticket = 1; } #if defined(MP_LOCKDEBUG) @@ -62,17 +62,6 @@ __mp_lock_init(struct __mp_lock *mpl) extern int __mp_lock_spinout; #endif -static inline u_int -fetch_and_add(u_int *var, u_int value) -{ - asm volatile("lock; xaddl %%eax, %2;" - : "=a" (value) - : "a" (value), "m" (*var) - : "memory"); - - return (value); -} - static __inline void __mp_lock_spin(struct __mp_lock *mpl, u_int me) { @@ -102,7 +91,7 @@ __mp_lock(struct __mp_lock *mpl) disable_intr(); if (cpu->mplc_depth++ == 0) - cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users); write_eflags(ef); __mp_lock_spin(mpl, cpu->mplc_ticket);