-/* $OpenBSD: atomic.h,v 1.10 2014/03/29 18:09:29 guenther Exp $ */
+/* $OpenBSD: atomic.h,v 1.11 2014/07/13 08:13:07 miod Exp $ */
/* Public Domain */
#ifdef MULTIPROCESSOR
/* actual implementation is hairy, see atomic.S */
-void atomic_setbits_int(volatile unsigned int *, unsigned int);
-void atomic_clearbits_int(volatile unsigned int *, unsigned int);
+void atomic_setbits_int(volatile unsigned int *, unsigned int);
+void atomic_clearbits_int(volatile unsigned int *, unsigned int);
+unsigned int atomic_add_int_nv_mp(volatile unsigned int *, unsigned int);
+unsigned int atomic_sub_int_nv_mp(volatile unsigned int *, unsigned int);
+unsigned int atomic_cas_uint_mp(unsigned int *, unsigned int, unsigned int);
+unsigned int atomic_swap_uint_mp(unsigned int *, unsigned int);
+
+#define atomic_add_int_nv atomic_add_int_nv_mp
+#define atomic_sub_int_nv atomic_sub_int_nv_mp
+#define atomic_cas_uint atomic_cas_uint_mp
+#define atomic_swap_uint atomic_swap_uint_mp
#else
set_psr(psr);
}
+static __inline unsigned int
+atomic_add_int_nv_sp(volatile unsigned int *uip, unsigned int v)
+{
+ u_int psr;
+ unsigned int nv;
+
+ psr = get_psr();
+ set_psr(psr | PSR_IND);
+ *uip += v;
+ nv = *uip;
+ set_psr(psr);
+
+ return nv;
+}
+
+static __inline unsigned int
+atomic_sub_int_nv_sp(volatile unsigned int *uip, unsigned int v)
+{
+ u_int psr;
+ unsigned int nv;
+
+ psr = get_psr();
+ set_psr(psr | PSR_IND);
+ *uip -= v;
+ nv = *uip;
+ set_psr(psr);
+
+ return nv;
+}
+
+static inline unsigned int
+atomic_cas_uint_sp(unsigned int *p, unsigned int o, unsigned int n)
+{
+ u_int psr;
+ unsigned int ov;
+
+ psr = get_psr();
+ set_psr(psr | PSR_IND);
+ ov = *p;
+ if (ov == o)
+ *p = n;
+ set_psr(psr);
+
+ return ov;
+}
+
+static inline unsigned int
+atomic_swap_uint_sp(unsigned int *p, unsigned int v)
+{
+ u_int psr;
+ unsigned int ov;
+
+ psr = get_psr();
+ set_psr(psr | PSR_IND);
+ ov = *p;
+ *p = v;
+ set_psr(psr);
+
+ return ov;
+}
+
+#define atomic_add_int_nv atomic_add_int_nv_sp
+#define atomic_sub_int_nv atomic_sub_int_nv_sp
+#define atomic_cas_uint atomic_cas_uint_sp
+#define atomic_swap_uint atomic_swap_uint_sp
+
#endif /* MULTIPROCESSOR */
static __inline__ unsigned int
return oldval;
}
+#define atomic_add_long_nv(p,v) \
+ ((unsigned long)atomic_add_int_nv((unsigned int *)p, (unsigned int)v))
+#define atomic_sub_long_nv(p,v) \
+ ((unsigned long)atomic_sub_int_nv((unsigned int *)p, (unsigned int)v))
+
+#define atomic_cas_ulong(p,o,n) \
+ ((unsigned long)atomic_cas_uint((unsigned int *)p, (unsigned int)o, \
+ (unsigned int)n))
+#define atomic_cas_ptr(p,o,n) \
+ ((void *)atomic_cas_uint((void **)p, (void *)o, (void *)n))
+
+#define atomic_swap_ulong(p,o) \
+ ((unsigned long)atomic_swap_uint((unsigned int *)p, (unsigned int)o)
+#define atomic_swap_ptr(p,o) \
+ ((void *)atomic_swap_uint((void **)p, (void *)o))
+
+static inline void
+__sync_synchronize(void)
+{
+ /* flush_pipeline(); */
+ __asm__ volatile ("tb1 0, %%r0, 0" ::: "memory");
+}
+
#endif /* defined(_KERNEL) */
#endif /* _M88K_ATOMIC_H_ */
-/* $OpenBSD: atomic.S,v 1.4 2013/06/04 22:11:51 tedu Exp $ */
+/* $OpenBSD: atomic.S,v 1.5 2014/07/13 08:13:07 miod Exp $ */
/*
* Copyright (c) 2009 Miodrag Vallat.
*
* r2 data address
* r3 bits to set or clear
- * r4 return address
- * r5 scratch
+ * r4 argument / scratch
+ * r5 return address
* r6 interlock address
* r7 psr upon entry
* r8 active psr
+ * r9 scratch
*/
ENTRY(atomic_setbits_int)
- or %r4, %r1, %r0 /* save return address */
+ or %r5, %r1, %r0 /* save return address */
bsr _C_LABEL(__atomic_lock)
- ld %r5, %r2, %r0
- or %r5, %r5, %r3
- st %r5, %r2, %r0
+ ld %r4, %r2, %r0
+ or %r4, %r4, %r3
+ st %r4, %r2, %r0
br _C_LABEL(__atomic_unlock)
ENTRY(atomic_clearbits_int)
- or %r4, %r1, %r0 /* save return address */
+ or %r5, %r1, %r0 /* save return address */
bsr _C_LABEL(__atomic_lock)
- ld %r5, %r2, %r0
- or %r5, %r5, %r3
- xor %r5, %r5, %r3 /* r5 &= ~r3 */
- st %r5, %r2, %r0
+ ld %r4, %r2, %r0
+ or %r4, %r4, %r3
+ xor %r4, %r4, %r3 /* r4 &= ~r3 */
+ st %r4, %r2, %r0
+
+ br _C_LABEL(__atomic_unlock)
+
+ENTRY(atomic_add_int_nv_mp)
+ or %r5, %r1, %r0 /* save return address */
+ bsr _C_LABEL(__atomic_lock)
+
+ or %r9, %r2, %r0
+ ld %r2, %r9, %r0
+ addu %r2, %r2, %r4
+ st %r2, %r9, %r0
+
+ br _C_LABEL(__atomic_unlock)
+
+ENTRY(atomic_sub_int_nv_mp)
+ or %r5, %r1, %r0 /* save return address */
+ bsr _C_LABEL(__atomic_lock)
+
+ or %r9, %r2, %r0
+ ld %r2, %r9, %r0
+ subu %r2, %r2, %r4
+ st %r2, %r9, %r0
+
+ br _C_LABEL(__atomic_unlock)
+
+ENTRY(atomic_cas_uint_mp)
+ or %r5, %r1, %r0 /* save return address */
+ bsr _C_LABEL(__atomic_lock)
+
+ ld %r9, %r2, %r0
+ cmp %r3, %r3, %r9
+ bcnd ne0, %r3, 1f
+ st %r4, %r2, %r0
+1:
+ or %r2, %r9, %r0
+
+ br _C_LABEL(__atomic_unlock)
+
+ENTRY(atomic_swap_uint_mp)
+ or %r5, %r1, %r0 /* save return address */
+ bsr _C_LABEL(__atomic_lock)
+
+ ld %r4, %r2, %r0
+ st %r3, %r2, %r0
+ or %r2, %r4, %r0
br _C_LABEL(__atomic_unlock)
or.u %r6, %r0, %hi16(_ASM_LABEL(__atomic_interlock))
or %r6, %r6, %lo16(_ASM_LABEL(__atomic_interlock))
1:
- or %r5, %r0, 1 /* __SIMPLELOCK_LOCKED */
- xmem %r5, %r6, %r0
- bcnd eq0, %r5, 3f
+ or %r9, %r0, 1 /* __SIMPLELOCK_LOCKED */
+ xmem %r9, %r6, %r0
+ bcnd eq0, %r9, 3f
2:
- ld %r5, %r6, %r0
- bcnd eq0, %r5, 1b
+ ld %r9, %r6, %r0
+ bcnd eq0, %r9, 1b
br 2b
3:
jmp %r1
or.u %r6, %r0, %hi16(_ASM_LABEL(__atomic_interlock))
or %r6, %r6, %lo16(_ASM_LABEL(__atomic_interlock))
1:
- or %r5, %r0, 1 /* __SIMPLELOCK_LOCKED */
- xmem %r5, %r6, %r0
- bcnd eq0, %r5, 3f
+ or %r9, %r0, 1 /* __SIMPLELOCK_LOCKED */
+ xmem %r9, %r6, %r0
+ bcnd eq0, %r9, 3f
2:
- ld %r5, %r6, %r0
- bcnd eq0, %r5, 1b
+ ld %r9, %r6, %r0
+ bcnd eq0, %r9, 1b
br 2b
3:
jmp %r1
st %r0, %r6, %r0 /* release interlock */
- stcr %r4, EXIP /* return address */
+ stcr %r5, EXIP /* return address */
stcr %r7, EPSR /* original PSR */
/*
stcr %r7, PSR
FLUSH_PIPELINE
- jmp %r4
+ jmp %r5
GLOBAL(__atomic_unlock_88100_end)
#endif