From: mpi Date: Wed, 22 Apr 2015 06:39:03 +0000 (+0000) Subject: Implement the MI atomic API for PowerPC to avoid using gcc builtins that X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=64a1b3614f5a5a739b38ed775790c0f6f7dbfa9b;p=openbsd Implement the MI atomic API for PowerPC to avoid using gcc builtins that include extra sync operations. ok kettenis@ --- diff --git a/sys/arch/powerpc/include/atomic.h b/sys/arch/powerpc/include/atomic.h index 497b63bfee6..daa35b4367f 100644 --- a/sys/arch/powerpc/include/atomic.h +++ b/sys/arch/powerpc/include/atomic.h @@ -1,6 +1,21 @@ -/* $OpenBSD: atomic.h,v 1.7 2014/06/19 11:29:21 kettenis Exp $ */ +/* $OpenBSD: atomic.h,v 1.8 2015/04/22 06:39:03 mpi Exp $ */ + +/* + * Copyright (c) 2015 Martin Pieuchot + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ -/* Public Domain */ #ifndef _POWERPC_ATOMIC_H_ #define _POWERPC_ATOMIC_H_ @@ -33,6 +48,157 @@ atomic_clearbits_int(volatile unsigned int *uip, unsigned int v) " sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory"); } +static inline unsigned int +_atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n) +{ + unsigned int rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " cmpw 0, %0, %3 \n" + " bne- 2f \n" + " stwcx. %2, 0, %1 \n" + " bne- 1b \n" + "2: \n" + : "=&r" (rv) + : "r" (p), "r" (n), "r" (o) + : "cc", "memory"); + + return (rv); +} +#define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n)) + +static inline unsigned long +_atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n) +{ + unsigned long rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " cmpw 0, %0, %3 \n" + " bne- 2f \n" + " stwcx. %2, 0, %1 \n" + " bne- 1b \n" + "2: \n" + : "=&r" (rv) + : "r" (p), "r" (n), "r" (o) + : "cc", "memory"); + + return (rv); +} +#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n)) + +static inline void * +_atomic_cas_ptr(volatile void *pp, void *o, void *n) +{ + void * volatile *p = pp; + void *rv; + + __asm volatile ( + "1: lwarx %0, 0, %3 \n" + " cmpw 0, %0, %1 \n" + " bne- 2f \n" + " stwcx. %2, 0, %3 \n" + " bne- 1b \n" + "2: \n" + : "=&r" (rv) + : "r" (o), "r" (n), "r" (p) + : "cc", "memory"); + + return (rv); +} +#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n)) + +static inline unsigned int +_atomic_swap_uint(volatile unsigned int *p, unsigned int v) +{ + unsigned int rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " stwcx. %2, 0, %1 \n" + " bne- 1b \n" + : "=&r" (rv) + : "r" (p), "r" (v) + : "cc", "memory"); + + return (rv); +} +#define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v)) + +static inline unsigned long +_atomic_swap_ulong(volatile unsigned long *p, unsigned long v) +{ + unsigned long rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " stwcx. %2, 0, %1 \n" + " bne- 1b \n" + : "=&r" (rv) + : "r" (p), "r" (v) + : "cc", "memory"); + + return (rv); +} +#define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v)) + +static inline void * +_atomic_swap_ptr(volatile void *pp, void *v) +{ + void * volatile *p = pp; + void *rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " stwcx. %2, 0, %1 \n" + " bne- 1b \n" + : "=&r" (rv) + : "r" (p), "r" (v) + : "cc", "memory"); + + return (rv); +} +#define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n)) + +static inline unsigned int +_atomic_add_int_nv(volatile unsigned int *p, unsigned int v) +{ + unsigned int rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " add %0, %2, %0 \n" + " stwcx. %0, 0, %1 \n" + " bne- 1b \n" + : "=&r" (rv) + : "r" (p), "r" (v) + : "cc", "memory"); + + return (rv + v); +} +#define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v)) +#define atomic_sub_int_nv(_p, _v) _atomic_add_int_nv((_p), 0 - (_v)) + +static inline unsigned long +_atomic_add_long_nv(volatile unsigned long *p, unsigned long v) +{ + unsigned long rv; + + __asm volatile ( + "1: lwarx %0, 0, %1 \n" + " add %0, %2, %0 \n" + " stwcx. %0, 0, %1 \n" + " bne- 1b \n" + : "=&r" (rv) + : "r" (p), "r" (v) + : "cc", "memory"); + + return (rv + v); +} +#define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v)) +#define atomic_sub_long_nv(_p, _v) _atomic_add_long_nv((_p), 0 - (_v)) + #define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0) #ifdef MULTIPROCESSOR