{
uint64_t fpsr;
- __asm __volatile("mov %0=ar.fpsr" : "=r" (fpsr));
+ __asm volatile("mov %0=ar.fpsr" : "=r" (fpsr));
return (~fpsr & 0x3d);
}
{
uint64_t fpsr;
- __asm __volatile("mov %0=ar.fpsr" : "=r"(fpsr));
+ __asm volatile("mov %0=ar.fpsr" : "=r"(fpsr));
return ((fp_rnd)((fpsr >> 10) & 3));
}
int64_t fpsr;
int64_t oldmask;
- __asm __volatile("mov %0=ar.fpsr" : "=r" (fpsr));
+ __asm volatile("mov %0=ar.fpsr" : "=r" (fpsr));
oldmask = ~fpsr & 0x3d;
fpsr = (fpsr & ~0x3d) | (~mask & 0x3d);
- __asm __volatile("mov ar.fpsr=%0" :: "r" (fpsr));
+ __asm volatile("mov ar.fpsr=%0" :: "r" (fpsr));
return (oldmask);
}
uint64_t fpsr;
fp_rnd prev;
- __asm __volatile("mov %0=ar.fpsr" : "=r"(fpsr));
+ __asm volatile("mov %0=ar.fpsr" : "=r"(fpsr));
prev = (fp_rnd)((fpsr >> 10) & 3);
fpsr = (fpsr & ~0xC00ULL) | ((unsigned int)rnd << 10);
- __asm __volatile("mov ar.fpsr=%0" :: "r"(fpsr));
+ __asm volatile("mov ar.fpsr=%0" :: "r"(fpsr));
return (prev);
}
-/* $OpenBSD: fabs.c,v 1.8 2013/03/28 18:09:38 martynas Exp $ */
+/* $OpenBSD: fabs.c,v 1.9 2014/04/17 09:01:25 guenther Exp $ */
/*
* Copyright (c) 2002 Theo de Raadt
double
fabs(double x)
{
- __asm__ __volatile("fabs %0,%1" : "=f"(x) : "f"(x));
+ __asm__ volatile("fabs %0,%1" : "=f"(x) : "f"(x));
return (x);
}
-/* $OpenBSD: flt_rounds.c,v 1.4 2012/06/25 17:01:11 deraadt Exp $ */
+/* $OpenBSD: flt_rounds.c,v 1.5 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: flt_rounds.c,v 1.5 2001/05/25 12:14:05 simonb Exp $ */
/*
double tmp;
int x;
- __asm__ __volatile("mffs %0; stfiwx %0,0,%1" : "=f"(tmp): "b"(&x));
+ __asm__ volatile("mffs %0; stfiwx %0,0,%1" : "=f"(tmp): "b"(&x));
return map[x & 0x03];
#endif
}
-/* $OpenBSD: fpgetmask.c,v 1.2 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: fpgetmask.c,v 1.3 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpgetmask.c,v 1.1 1999/07/07 01:55:07 danw Exp $ */
/*
{
u_int64_t fpscr;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
return ((fpscr >> 3) & 0x1f);
}
-/* $OpenBSD: fpgetround.c,v 1.2 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: fpgetround.c,v 1.3 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpgetround.c,v 1.1 1999/07/07 01:55:08 danw Exp $ */
/*
{
u_int64_t fpscr;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
return (fpscr & 0x3);
}
-/* $OpenBSD: fpgetsticky.c,v 1.2 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: fpgetsticky.c,v 1.3 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpgetsticky.c,v 1.1 1999/07/07 01:55:08 danw Exp $ */
/*
{
u_int64_t fpscr;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
return ((fpscr >> 25) & 0x1f);
}
-/* $OpenBSD: fpsetmask.c,v 1.3 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: fpsetmask.c,v 1.4 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpsetmask.c,v 1.1 1999/07/07 01:55:08 danw Exp $ */
/*
u_int64_t fpscr;
fp_rnd old;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
old = (fpscr >> 3) & 0x1f;
fpscr = (fpscr & 0xffffff07ULL) | (mask << 3);
- __asm__ __volatile("mtfsf 0xff,%0" :: "f"(fpscr));
+ __asm__ volatile("mtfsf 0xff,%0" :: "f"(fpscr));
return (old);
}
-/* $OpenBSD: fpsetround.c,v 1.3 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: fpsetround.c,v 1.4 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpsetround.c,v 1.1 1999/07/07 01:55:08 danw Exp $ */
/*
u_int64_t fpscr;
fp_rnd old;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
old = fpscr & 0x3;
fpscr = (fpscr & 0xfffffffcULL) | rnd_dir;
- __asm__ __volatile("mtfsf 0xff,%0" :: "f"(fpscr));
+ __asm__ volatile("mtfsf 0xff,%0" :: "f"(fpscr));
return (old);
}
-/* $OpenBSD: fpsetsticky.c,v 1.4 2011/05/25 21:26:55 martynas Exp $ */
+/* $OpenBSD: fpsetsticky.c,v 1.5 2014/04/17 09:01:25 guenther Exp $ */
/* $NetBSD: fpsetsticky.c,v 1.1 1999/07/07 01:55:08 danw Exp $ */
/*
u_int64_t fpscr;
fp_rnd old;
- __asm__ __volatile("mffs %0" : "=f"(fpscr));
+ __asm__ volatile("mffs %0" : "=f"(fpscr));
old = (fpscr >> 25) & 0x1f;
fpscr = (fpscr & 0xe1ffffffULL) | ((mask & 0xf) << 25);
if (mask & FP_X_INV)
fpscr |= 0x400;
else
fpscr &= 0xfe07f8ffULL;
- __asm__ __volatile("mtfsf 0xff,%0" :: "f"(fpscr));
+ __asm__ volatile("mtfsf 0xff,%0" :: "f"(fpscr));
return (old);
}
-/* $OpenBSD: fpu_arith.h,v 1.2 2011/09/17 08:38:07 miod Exp $ */
+/* $OpenBSD: fpu_arith.h,v 1.3 2014/04/17 09:01:25 guenther Exp $ */
/*
* Copyright (c) 1992, 1993
* into carry; GET_CARRY sets its argument to 0 or 1.
*/
#define FPU_ADDC(r, x, y) \
- __asm __volatile("addx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("addx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
#define FPU_ADDS(r, x, y) \
- __asm __volatile("addcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("addcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
#define FPU_ADDCS(r, x, y) \
- __asm __volatile("addxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("addxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
#define FPU_SUBC(r, x, y) \
- __asm __volatile("subx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("subx %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
#define FPU_SUBS(r, x, y) \
- __asm __volatile("subcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("subcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
#define FPU_SUBCS(r, x, y) \
- __asm __volatile("subxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
+ __asm volatile("subxcc %1,%2,%0" : "=r"(r) : "r"(x), "r"(y))
-#define FPU_GET_CARRY(r) __asm __volatile("addx %%g0,%%g0,%0" : "=r"(r))
-#define FPU_SET_CARRY(v) __asm __volatile("addcc %0,-1,%%g0" : : "r"(v))
+#define FPU_GET_CARRY(r) __asm volatile("addx %%g0,%%g0,%0" : "=r"(r))
+#define FPU_SET_CARRY(v) __asm volatile("addcc %0,-1,%%g0" : : "r"(v))
#define FPU_SHL1_BY_ADD /* shift left 1 faster by ADDC than (a<<1)|(b>>31) */
-/* $OpenBSD: fpu_qp.c,v 1.4 2012/12/05 23:19:59 deraadt Exp $ */
+/* $OpenBSD: fpu_qp.c,v 1.5 2014/04/17 09:01:25 guenther Exp $ */
/*-
* Copyright (c) 2002 Jake Burkholder.
{ \
struct fpemu fe; \
struct fpn *r; \
- __asm __volatile("stx %%fsr, [%0]" : : "r" (&fe.fe_fsr)); \
+ __asm volatile("stx %%fsr, [%0]" : : "r" (&fe.fe_fsr)); \
fe.fe_f1.fp_sign = a[0] >> 31; \
fe.fe_f1.fp_sticky = 0; \
fe.fe_f1.fp_class = __fpu_qtof(&fe.fe_f1, a[0], a[1], a[2], a[3]); \
{ \
struct fpemu fe; \
atype *a; \
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
a = (atype *)&n; \
fe.fe_f1.fp_sign = signed ? a[0] >> 31 : 0; \
fe.fe_f1.fp_sticky = 0; \
struct fpemu fe; \
u_int *a; \
type n; \
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
a = (u_int *)&n; \
fe.fe_f1.fp_sign = c[0] >> 31; \
fe.fe_f1.fp_sticky = 0; \
struct fpemu fe; \
u_int *a; \
type n; \
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
a = (u_int *)&n; \
fe.fe_f1.fp_sign = c[0] >> 31; \
fe.fe_f1.fp_sticky = 0; \
struct fpemu fe; \
u_int *a; \
type n; \
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
a = (u_int *)&n; \
fe.fe_f1.fp_sign = c[0] >> 31; \
fe.fe_f1.fp_sticky = 0; \
_Qp_ ## name(u_int *a, u_int *b) \
{ \
struct fpemu fe; \
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :); \
fe.fe_f1.fp_sign = a[0] >> 31; \
fe.fe_f1.fp_sticky = 0; \
fe.fe_f1.fp_class = __fpu_qtof(&fe.fe_f1, a[0], a[1], a[2], a[3]); \
{
struct fpemu fe;
struct fpn *r;
- __asm __volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :);
+ __asm volatile("stx %%fsr, %0" : "=m" (fe.fe_fsr) :);
fe.fe_f1.fp_sign = a[0] >> 31;
fe.fe_f1.fp_sticky = 0;
fe.fe_f1.fp_class = __fpu_qtof(&fe.fe_f1, a[0], a[1], a[2], a[3]);