-/* $OpenBSD: atomic.h,v 1.9 2015/04/24 15:26:22 mpi Exp $ */
+/* $OpenBSD: atomic.h,v 1.10 2015/05/06 03:30:03 dlg Exp $ */
+
+/*
+ * Copyright (c) 2015 Martin Pieuchot
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
-/* Public Domain */
#ifndef _POWERPC_ATOMIC_H_
#define _POWERPC_ATOMIC_H_
" sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
}
+static inline unsigned int
+_atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
+{
+ unsigned int rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " cmpw 0, %0, %4 \n"
+ " bne- 2f \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ "2: \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (n), "r" (o)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
+
+static inline unsigned long
+_atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
+{
+ unsigned long rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " cmpw 0, %0, %4 \n"
+ " bne- 2f \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ "2: \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (n), "r" (o)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
+
+static inline void *
+_atomic_cas_ptr(volatile void *pp, void *o, void *n)
+{
+ void * volatile *p = pp;
+ void *rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " cmpw 0, %0, %4 \n"
+ " bne- 2f \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ "2: \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (n), "r" (o)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
+
+static inline unsigned int
+_atomic_swap_uint(volatile unsigned int *p, unsigned int v)
+{
+ unsigned int rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
+
+static inline unsigned long
+_atomic_swap_ulong(volatile unsigned long *p, unsigned long v)
+{
+ unsigned long rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
+
+static inline void *
+_atomic_swap_ptr(volatile void *pp, void *v)
+{
+ void * volatile *p = pp;
+ void *rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " stwcx. %3, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc");
+
+ return (rv);
+}
+#define atomic_swap_ptr(_p, _v) _atomic_swap_ptr((_p), (_v))
+
+static inline unsigned int
+_atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
+{
+ unsigned int rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " add %0, %3, %0 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
+
+static inline unsigned long
+_atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
+{
+ unsigned long rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " add %0, %3, %0 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
+
+static inline unsigned int
+_atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
+{
+ unsigned int rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " subf %0, %3, %0 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
+
+static inline unsigned long
+_atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
+{
+ unsigned long rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " subf %0, %3, %0 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "r" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
+
+static inline unsigned int
+_atomic_addic_int_nv(volatile unsigned int *p, unsigned int v)
+{
+ unsigned int rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " addic %0, %0, %3 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "n" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_inc_int_nv(_p) _atomic_addic_int_nv((_p), 1)
+#define atomic_dec_int_nv(_p) _atomic_addic_int_nv((_p), -1)
+
+static inline unsigned long
+_atomic_addic_long_nv(volatile unsigned long *p, unsigned long v)
+{
+ unsigned long rv;
+
+ __asm volatile (
+ "1: lwarx %0, 0, %2 \n"
+ " addic %0, %0, %3 \n"
+ " stwcx. %0, 0, %2 \n"
+ " bne- 1b \n"
+ : "=&r" (rv), "+m" (*p)
+ : "r" (p), "n" (v)
+ : "cc", "xer");
+
+ return (rv);
+}
+#define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
+#define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
+
#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
#ifdef MULTIPROCESSOR