cmpti2.c
comparedf2.c
comparesf2.c
- cpu_model.c
ctzdi2.c
ctzsi2.c
ctzti2.c
divtc3.c
divti3.c
divtf3.c
- divxc3.c
extendsfdf2.c
extendhfsf2.c
ffsdi2.c
fixunssfdi.c
fixunssfsi.c
fixunssfti.c
- fixunsxfdi.c
- fixunsxfsi.c
- fixunsxfti.c
- fixxfdi.c
- fixxfti.c
floatdidf.c
floatdisf.c
- floatdixf.c
floatsidf.c
floatsisf.c
floattidf.c
floattisf.c
- floattixf.c
floatundidf.c
floatundisf.c
- floatundixf.c
floatunsidf.c
floatunsisf.c
floatuntidf.c
floatuntisf.c
- floatuntixf.c
int_util.c
lshrdi3.c
lshrti3.c
mulvdi3.c
mulvsi3.c
mulvti3.c
- mulxc3.c
negdf2.c
negdi2.c
negsf2.c
powidf2.c
powisf2.c
powitf2.c
- powixf2.c
subdf3.c
subsf3.c
subvdi3.c
clear_cache.c)
endif()
+# These sources work on all x86 variants, but only x86 variants.
+set(x86_ARCH_SOURCES
+ cpu_model.c
+ divxc3.c
+ fixxfdi.c
+ fixxfti.c
+ fixunsxfdi.c
+ fixunsxfsi.c
+ fixunsxfti.c
+ floatdixf.c
+ floattixf.c
+ floatundixf.c
+ floatuntixf.c
+ mulxc3.c
+ powixf2.c
+)
+
if (NOT MSVC)
set(x86_64_SOURCES
- x86_64/chkstk.S
- x86_64/chkstk2.S
x86_64/floatdidf.c
x86_64/floatdisf.c
x86_64/floatdixf.c
x86_64/floatundidf.S
x86_64/floatundisf.S
- x86_64/floatundixf.S
- ${GENERIC_SOURCES})
+ x86_64/floatundixf.S)
+ filter_builtin_sources(x86_64_SOURCES EXCLUDE x86_64_SOURCES "${x86_64_SOURCES};${GENERIC_SOURCES}")
set(x86_64h_SOURCES ${x86_64_SOURCES})
if (WIN32)
set(i386_SOURCES
i386/ashldi3.S
i386/ashrdi3.S
- i386/chkstk.S
- i386/chkstk2.S
i386/divdi3.S
i386/floatdidf.S
i386/floatdisf.S
i386/moddi3.S
i386/muldi3.S
i386/udivdi3.S
- i386/umoddi3.S
- ${GENERIC_SOURCES})
+ i386/umoddi3.S)
+ filter_builtin_sources(i386_SOURCES EXCLUDE i386_SOURCES "${i386_SOURCES};${GENERIC_SOURCES}")
if (WIN32)
set(i386_SOURCES
i386/chkstk.S
i386/chkstk2.S)
endif()
-
- set(i686_SOURCES
- ${i386_SOURCES})
else () # MSVC
# Use C versions of functions when building on MSVC
# MSVC's assembler takes Intel syntax, not AT&T syntax.
${GENERIC_SOURCES})
set(x86_64h_SOURCES ${x86_64_SOURCES})
set(i386_SOURCES ${GENERIC_SOURCES})
- set(i686_SOURCES ${i386_SOURCES})
endif () # if (NOT MSVC)
+set(x86_64h_SOURCES ${x86_64h_SOURCES} ${x86_ARCH_SOURCES})
+set(x86_64_SOURCES ${x86_64_SOURCES} ${x86_ARCH_SOURCES})
+set(i386_SOURCES ${i386_SOURCES} ${x86_ARCH_SOURCES})
+set(i686_SOURCES ${i686_SOURCES} ${x86_ARCH_SOURCES})
+
set(arm_SOURCES
arm/bswapdi2.S
arm/bswapsi2.S
arm/sync_fetch_and_xor_8.S
arm/udivmodsi4.S
arm/udivsi3.S
- arm/umodsi3.S
- ${GENERIC_SOURCES})
+ arm/umodsi3.S)
+filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}")
set(thumb1_SOURCES
arm/divsi3.S
udivsi3.c
umoddi3.c
emutls.c)
+ filter_builtin_sources(arm_SOURCES EXCLUDE arm_SOURCES "${arm_SOURCES};${GENERIC_SOURCES}")
elseif(NOT WIN32)
# TODO the EABI sources should only be added to EABI targets
set(arm_SOURCES
${GENERIC_TF_SOURCES}
${GENERIC_SOURCES})
+if (MINGW)
+ set(aarch64_SOURCES
+ ${aarch64_SOURCES}
+ aarch64/chkstk.S)
+endif()
+
set(armhf_SOURCES ${arm_SOURCES})
set(armv7_SOURCES ${arm_SOURCES})
set(armv7s_SOURCES ${arm_SOURCES})
set(mips64el_SOURCES ${GENERIC_TF_SOURCES}
${mips_SOURCES})
-set(wasm32_SOURCES ${GENERIC_SOURCES})
-set(wasm64_SOURCES ${GENERIC_SOURCES})
+set(powerpc64_SOURCES
+ ppc/divtc3.c
+ ppc/fixtfdi.c
+ ppc/fixunstfdi.c
+ ppc/floatditf.c
+ ppc/floatunditf.c
+ ppc/gcc_qadd.c
+ ppc/gcc_qdiv.c
+ ppc/gcc_qmul.c
+ ppc/gcc_qsub.c
+ ppc/multc3.c
+ ${GENERIC_SOURCES})
+set(powerpc64le_SOURCES ${powerpc64_SOURCES})
+
+set(wasm32_SOURCES
+ ${GENERIC_TF_SOURCES}
+ ${GENERIC_SOURCES})
+set(wasm64_SOURCES
+ ${GENERIC_TF_SOURCES}
+ ${GENERIC_SOURCES})
add_custom_target(builtins)
set_target_properties(builtins PROPERTIES FOLDER "Compiler-RT Misc")
# NOTE: some architectures (e.g. i386) have multiple names. Ensure that
# we catch them all.
set(_arch ${arch})
- if("${arch}" STREQUAL "i686")
- set(_arch "i386|i686")
+ if("${arch}" STREQUAL "armv6m")
+ set(_arch "arm|armv6m")
+ elseif("${arch}" MATCHES "^(armhf|armv7|armv7s|armv7k|armv7m|armv7em)$")
+ set(_arch "arm")
endif()
# Filter out generic versions of routines that are re-implemented in
--- /dev/null
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+
+#include "../assembly.h"
+
+// __chkstk routine
+// This routine is windows specific.
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+// This clobbers registers x16 and x17.
+// Does not modify any memory or the stack pointer.
+
+// mov x15, #256 // Number of bytes of stack, in units of 16 byte
+// bl __chkstk
+// sub sp, sp, x15, lsl #4
+
+#ifdef __aarch64__
+
+#define PAGE_SIZE 4096
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__chkstk)
+ lsl x16, x15, #4
+ mov x17, sp
+1:
+ sub x17, x17, #PAGE_SIZE
+ subs x16, x16, #PAGE_SIZE
+ ldr xzr, [x17]
+ b.gt 1b
+
+ ret
+END_COMPILERRT_FUNCTION(__chkstk)
+
+#endif // __aarch64__
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_dadd(double a, double b) {
return __adddf3(a, b);
}
+#else
+AEABI_RTABI double __aeabi_dadd(double a, double b) COMPILER_RT_ALIAS(__adddf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_fadd(float a, float b) {
return __addsf3(a, b);
}
+#else
+AEABI_RTABI float __aeabi_fadd(float a, float b) COMPILER_RT_ALIAS(__addsf3);
+#endif
#endif
-
push {r0-r3, lr}
bl __aeabi_cdcmpeq_check_nan
cmp r0, #1
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cdcmple can't trap
mov r0, sp
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cdcmple can't trap
+ // Use "it ne" + unconditional branch to guarantee a supported relocation if
+ // __aeabi_cdcmple is in a different section for some builds.
+ IT(ne)
bne __aeabi_cdcmple
-#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
+#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
bl __aeabi_dcmplt
cmp r0, #1
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1
push {r0-r3, lr}
bl __aeabi_cfcmpeq_check_nan
cmp r0, #1
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cfcmple can't trap
mov r0, sp
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cfcmple can't trap
+ // Use "it ne" + unconditional branch to guarantee a supported relocation if
+ // __aeabi_cfcmple is in a different section for some builds.
+ IT(ne)
bne __aeabi_cfcmple
-#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
+#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
bl __aeabi_fcmplt
cmp r0, #1
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1
#endif
.syntax unified
+ .text
+ DEFINE_CODE_STATE
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
muls r2, r0, r2 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
-#else
+#else // defined(USE_THUMB_1)
push { lr }
sub sp, sp, #4
mov r2, sp
ldr r1, [sp]
add sp, sp, #4
pop { pc }
-#endif // __ARM_ARCH_ISA_THUMB == 1
+#endif // defined(USE_THUMB_1)
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memcmp
+ pop {r7, pc}
+#else
b memcmp
+#endif
END_COMPILERRT_FUNCTION(__aeabi_memcmp)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memcpy
+ pop {r7, pc}
+#else
b memcpy
+#endif
END_COMPILERRT_FUNCTION(__aeabi_memcpy)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memmove)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memmove
+ pop {r7, pc}
+#else
b memmove
+#endif
END_COMPILERRT_FUNCTION(__aeabi_memmove)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)
mov r3, r1
mov r1, r2
mov r2, r3
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memset
+ pop {r7, pc}
+#else
b memset
+#endif
END_COMPILERRT_FUNCTION(__aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset4, __aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
+ .p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
mov r2, r1
movs r1, #0
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memset
+ pop {r7, pc}
+#else
b memset
+#endif
END_COMPILERRT_FUNCTION(__aeabi_memclr)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)
#endif
.syntax unified
+ .text
+ DEFINE_CODE_STATE
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
cmp r0, r1
bcc LOCAL_LABEL(case_denom_larger)
push {r0, r1, lr}
movs r1, r0
movs r0, #0
JMP (lr)
-#else
+#else // defined(USE_THUMB_1)
push { lr }
sub sp, sp, #4
mov r2, sp
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
//
// extern uint64_t __bswapdi2(uint64_t);
// Reverse all the bytes in a 64-bit integer.
//
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__bswapdi2)
-#else
DEFINE_COMPILERRT_FUNCTION(__bswapdi2)
-#endif
#if __ARM_ARCH < 6
// before armv6 does not have "rev" instruction
// r2 = rev(r0)
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
//
// extern uint32_t __bswapsi2(uint32_t);
// Reverse all the bytes in a 32-bit integer.
//
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__bswapsi2)
-#else
DEFINE_COMPILERRT_FUNCTION(__bswapsi2)
-#endif
#if __ARM_ARCH < 6
// before armv6 does not have "rev" instruction
eor r1, r0, r0, ror #16
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
-
+ DEFINE_CODE_STATE
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__clzdi2)
-#else
DEFINE_COMPILERRT_FUNCTION(__clzdi2)
-#endif
#ifdef __ARM_FEATURE_CLZ
#ifdef __ARMEB__
cmp r0, 0
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__clzsi2)
-#else
DEFINE_COMPILERRT_FUNCTION(__clzsi2)
-#endif
#ifdef __ARM_FEATURE_CLZ
clz r0, r0
JMP(lr)
//===----------------------------------------------------------------------===//
#include "../assembly.h"
-.syntax unified
-#if __ARM_ARCH_ISA_THUMB == 2
-.thumb
-#endif
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
@ int __eqsf2(float a, float b)
#endif
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
push {r6, lr}
lsls r2, r0, #1
lsls r3, r1, #1
// flag if both a and b are zero (of either sign). The shift of r3 doesn't
// effect this at all, but it *does* make sure that the C flag is clear for
// the subsequent operations.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
lsrs r6, r3, #1
orrs r6, r2
#else
#endif
// Next, we check if a and b have the same or different signs. If they have
// opposite signs, this eor will set the N flag.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
beq 1f
movs r6, r0
eors r6, r1
// ignoring NaNs for now), this subtract will zero out r0. If they have the
// same sign, the flags are updated as they would be for a comparison of the
// absolute values of a and b.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bmi 1f
subs r0, r2, r3
1:
// still clear from the shift argument in orrs; if a is positive and b
// negative, this places 0 in r0; if a is negative and b positive, -1 is
// placed in r0.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bhs 1f
// Here if a and b have the same sign and absA < absB, the result is thus
// b < 0 ? 1 : -1. Same if a and b have the opposite sign (ignoring Nan).
// the sign of b in r0. Thus, if both are negative and a < b, -1 is placed
// in r0, which is the desired result. Conversely, if both are positive
// and a > b, zero is placed in r0.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bls 1f
// Here both have the same sign and absA > absB.
movs r0, #1
// If a == b, then the Z flag is set, so we can get the correct final value
// into r0 by simply or'ing with 1 if Z is clear.
// For Thumb-1, r0 contains -1 if a < b, 0 if a > b and 0 if a == b.
-#if __ARM_ARCH_ISA_THUMB != 1
+#if !defined(USE_THUMB_1)
it ne
orrne r0, r0, #1
#endif
// Finally, we need to deal with NaNs. If either argument is NaN, replace
// the value in r0 with 1.
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
LOCAL_LABEL(CHECK_NAN):
movs r6, #0xff
lsls r6, #24
vmov r0, s0
vmov r1, s1
#endif
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
push {r6, lr}
lsls r2, r0, #1
lsls r3, r1, #1
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
+
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
lsls r2, r0, #1
lsls r3, r1, #1
movs r0, #0
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
movs r1, #0xff
lsls r1, #24
cmp r2, r1
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
@ int __divmodsi4(int divident, int divisor, int *remainder)
@ Calculate the quotient and remainder of the (signed) division. The return
@ value is the quotient, the remainder is placed in the variable.
.p2align 3
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__divmodsi4)
-#else
DEFINE_COMPILERRT_FUNCTION(__divmodsi4)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divzero)
#define CLEAR_FRAME_AND_RETURN \
pop {r4, r7, pc}
- .syntax unified
- .text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
.p2align 3
// Ok, APCS and AAPCS agree on 32 bit args, so it's safe to use the same routine.
@ int __divsi3(int divident, int divisor)
@ Calculate and return the quotient of the (signed) division.
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__divsi3)
-#else
DEFINE_COMPILERRT_FUNCTION(__divsi3)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1,r1
beq LOCAL_LABEL(divzero)
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
movs r4, r0
eors r4, r1
# else
eor r4, r0, r1
# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
asrs r2, r0, #31
asrs r3, r1, #31
eors r0, r2
// abs(a) / abs(b)
bl SYMBOL_NAME(__udivsi3)
// Apply sign of quotient to result and return.
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
asrs r4, #31
eors r0, r4
subs r0, r0, r4
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
@ int __modsi3(int divident, int divisor)
@ Calculate and return the remainder of the (signed) division.
.p2align 3
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__modsi3)
-#else
DEFINE_COMPILERRT_FUNCTION(__modsi3)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divzero)
.syntax unified
.text
-
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
@ unsigned int __udivmodsi4(unsigned int divident, unsigned int divisor,
@ unsigned int *remainder)
@ value is the quotient, the remainder is placed in the variable.
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__udivmodsi4)
-#else
DEFINE_COMPILERRT_FUNCTION(__udivmodsi4)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
sub r3, r3, ip
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
adr ip, LOCAL_LABEL(div0block) + 1
sub ip, ip, r3, lsl #1
# else
mov r3, #0
bx ip
# else
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
# error THUMB mode requires CLZ or UDIV
# endif
str r4, [sp, #-8]!
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+DEFINE_CODE_STATE
.p2align 2
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
@ unsigned int __udivsi3(unsigned int divident, unsigned int divisor)
@ Calculate and return the quotient of the (unsigned) division.
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__udivsi3)
-#else
DEFINE_COMPILERRT_FUNCTION(__udivsi3)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)
#else /* ! __ARM_ARCH_EXT_IDIV__ */
cmp r1, #1
bcc LOCAL_LABEL(divby0)
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bne LOCAL_LABEL(num_neq_denom)
JMP(lr)
LOCAL_LABEL(num_neq_denom):
JMPc(lr, eq)
#endif
cmp r0, r1
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
bhs LOCAL_LABEL(num_ge_denom)
movs r0, #0
JMP(lr)
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
sub r3, r3, ip
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
adr ip, LOCAL_LABEL(div0block) + 1
sub ip, ip, r3, lsl #1
# else
mov r3, #0
bx ip
# else /* No CLZ Feature */
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
# error THUMB mode requires CLZ or UDIV
# endif
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
# define BLOCK_SIZE 10
# else
# define BLOCK_SIZE 12
# endif
mov r2, r0
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
mov ip, r0
adr r0, LOCAL_LABEL(div0block)
adds r0, #1
# endif
lsrs r3, r2, #16
cmp r3, r1
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
blo LOCAL_LABEL(skip_16)
movs r2, r3
subs r0, r0, #(16 * BLOCK_SIZE)
lsrs r3, r2, #8
cmp r3, r1
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
blo LOCAL_LABEL(skip_8)
movs r2, r3
subs r0, r0, #(8 * BLOCK_SIZE)
lsrs r3, r2, #4
cmp r3, r1
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
blo LOCAL_LABEL(skip_4)
movs r2, r3
subs r0, r0, #(4 * BLOCK_SIZE)
lsrs r3, r2, #2
cmp r3, r1
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
blo LOCAL_LABEL(skip_2)
movs r2, r3
subs r0, r0, #(2 * BLOCK_SIZE)
# endif
/* Last block, no need to update r2 or r3. */
-# if __ARM_ARCH_ISA_THUMB == 1
+# if defined(USE_THUMB_1)
lsrs r3, r2, #1
cmp r3, r1
blo LOCAL_LABEL(skip_1)
# endif
-#if __ARM_ARCH_ISA_THUMB == 1
+#if defined(USE_THUMB_1)
#define block(shift) \
lsls r2, r1, IMM shift; \
cmp r0, r2; \
.syntax unified
.text
-#if __ARM_ARCH_ISA_THUMB == 2
- .thumb
-#endif
+ DEFINE_CODE_STATE
@ unsigned int __umodsi3(unsigned int divident, unsigned int divisor)
@ Calculate and return the remainder of the (unsigned) division.
.p2align 2
-#if __ARM_ARCH_ISA_THUMB == 2
-DEFINE_COMPILERRT_THUMB_FUNCTION(__umodsi3)
-#else
DEFINE_COMPILERRT_FUNCTION(__umodsi3)
-#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
sub r3, r3, ip
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
adr ip, LOCAL_LABEL(div0block) + 1
sub ip, ip, r3, lsl #1
# else
sub ip, ip, r3, lsl #3
bx ip
# else
-# if __ARM_ARCH_ISA_THUMB == 2
+# if defined(USE_THUMB_2)
# error THUMB mode requires CLZ or UDIV
# endif
mov r2, r0
}
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b) {
- return __ashldi3(a, b);
-}
+AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b) COMPILER_RT_ALIAS(__ashldi3);
#endif
-
}
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) {
- return __ashrdi3(a, b);
-}
+AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) COMPILER_RT_ALIAS(__ashrdi3);
#endif
-
#endif
#if defined(__arm__)
+
+/*
+ * Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
+ * - for '-mthumb -march=armv6' compiler defines '__thumb__'
+ * - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
+ */
+#if defined(__thumb2__) || defined(__thumb__)
+#define DEFINE_CODE_STATE .thumb SEPARATOR
+#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
+#if defined(__thumb2__)
+#define USE_THUMB_2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#define ITE(cond) ite cond
+#else
+#define USE_THUMB_1
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif // defined(__thumb__2)
+#else // !defined(__thumb2__) && !defined(__thumb__)
+#define DEFINE_CODE_STATE .arm SEPARATOR
+#define DECLARE_FUNC_ENCODING
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif
+
+#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
+#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
+#endif
+
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
-#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
+#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
JMP(ip)
#endif
-#if __ARM_ARCH_ISA_THUMB == 2
-#define IT(cond) it cond
-#define ITT(cond) itt cond
-#define ITE(cond) ite cond
-#else
-#define IT(cond)
-#define ITT(cond)
-#define ITE(cond)
-#endif
-
-#if __ARM_ARCH_ISA_THUMB == 2
+#if defined(USE_THUMB_2)
#define WIDE(op) op.w
#else
#define WIDE(op) op
#endif
+#else // !defined(__arm)
+#define DECLARE_FUNC_ENCODING
+#define DEFINE_CODE_STATE
#endif
#define GLUE2(a, b) a##b
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
+ DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
name:
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
*/
#include "int_lib.h"
+#include <assert.h>
#include <stddef.h>
#if __APPLE__
uintptr_t GetCurrentProcess(void);
#endif
-#if (defined(__FreeBSD__) || defined(__Bitrig__)) && defined(__arm__)
+#if defined(__FreeBSD__) && defined(__arm__)
#include <sys/types.h>
#include <machine/sysarch.h>
#endif
#include <machine/sysarch.h>
#endif
-#if defined(__mips__)
+#if defined(__OpenBSD__) && defined(__mips__)
+ #include <sys/types.h>
+ #include <machine/sysarch.h>
+#endif
+
+#if defined(__linux__) && defined(__mips__)
#include <sys/cachectl.h>
#include <sys/syscall.h>
#include <unistd.h>
* clear_mips_cache - Invalidates instruction cache for Mips.
*/
static void clear_mips_cache(const void* Addr, size_t Size) {
- asm volatile (
+ __asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
* so there is nothing to do
*/
#elif defined(__arm__) && !defined(__APPLE__)
- #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__Bitrig__)
+ #if defined(__FreeBSD__) || defined(__NetBSD__)
struct arm_sync_icache_args arg;
arg.addr = (uintptr_t)start;
: "=r"(start_reg)
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
"r"(flags));
- if (start_reg != 0) {
- compilerrt_abort();
- }
+ assert(start_reg == 0 && "Cache flush syscall failed.");
#elif defined(_WIN32)
FlushInstructionCache(GetCurrentProcess(), start, end - start);
#else
compilerrt_abort();
#endif
-#elif defined(__mips__)
+#elif defined(__linux__) && defined(__mips__)
const uintptr_t start_int = (uintptr_t) start;
const uintptr_t end_int = (uintptr_t) end;
#if defined(__ANDROID__) && defined(__LP64__)
#else
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
#endif
+#elif defined(__mips__) && defined(__OpenBSD__)
+ cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);
#elif defined(__aarch64__) && !defined(__APPLE__)
uint64_t xstart = (uint64_t)(uintptr_t) start;
uint64_t xend = (uint64_t)(uintptr_t) end;
* uintptr_t in case this runs in an IPL32 environment.
*/
const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
- for (addr = xstart; addr < xend; addr += dcache_line_size)
+ for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
+ addr += dcache_line_size)
__asm __volatile("dc cvau, %0" :: "r"(addr));
__asm __volatile("dsb ish");
const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
- for (addr = xstart; addr < xend; addr += icache_line_size)
+ for (addr = xstart & ~(icache_line_size - 1); addr < xend;
+ addr += icache_line_size)
__asm __volatile("ic ivau, %0" :: "r"(addr));
__asm __volatile("isb sy");
+#elif defined (__powerpc64__)
+ const size_t line_size = 32;
+ const size_t len = (uintptr_t)end - (uintptr_t)start;
+
+ const uintptr_t mask = ~(line_size - 1);
+ const uintptr_t start_line = ((uintptr_t)start) & mask;
+ const uintptr_t end_line = ((uintptr_t)start + len + line_size - 1) & mask;
+
+ for (uintptr_t line = start_line; line < end_line; line += line_size)
+ __asm__ volatile("dcbf 0, %0" : : "r"(line));
+ __asm__ volatile("sync");
+
+ for (uintptr_t line = start_line; line < end_line; line += line_size)
+ __asm__ volatile("icbi 0, %0" : : "r"(line));
+ __asm__ volatile("isync");
#else
#if __APPLE__
/* On Darwin, sys_icache_invalidate() provides this functionality */
#endif
#endif
}
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) {
return __unorddf2(a, b);
}
+#else
+AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unorddf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) {
return __unordsf2(a, b);
}
+#else
+AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unordsf2);
+#endif
#endif
-
AMD_BTVER1,
AMD_BTVER2,
AMDFAM17H,
+ INTEL_KNM,
CPU_TYPE_MAX
};
INTEL_COREI7_BROADWELL,
INTEL_COREI7_SKYLAKE,
INTEL_COREI7_SKYLAKE_AVX512,
+ INTEL_COREI7_CANNONLAKE,
CPU_SUBTYPE_MAX
};
*Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
break;
+ // Cannonlake:
+ case 0x66:
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_CANNONLAKE; // "cannonlake"
+ break;
+
case 0x1c: // Most 45 nm Intel Atom processors
case 0x26: // 45 nm Atom Lincroft
case 0x27: // 32 nm Atom Medfield
*Type = INTEL_KNL; // knl
break;
+ case 0x85:
+ *Type = INTEL_KNM; // knm
+ break;
+
default: // Unknown family 6 CPU.
break;
break;
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) {
return __divdf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divdf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) {
return __divsf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divsf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
-AEABI_RTABI si_int __aeabi_idiv(si_int a, si_int b) {
- return __divsi3(a, b);
-}
+AEABI_RTABI si_int __aeabi_idiv(si_int a, si_int b) COMPILER_RT_ALIAS(__divsi3);
#endif
-
#include <malloc.h>
#include <stdio.h>
#include <assert.h>
-#include <immintrin.h>
static LPCRITICAL_SECTION emutls_mutex;
static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
*/
#if !defined(__ATOMIC_RELEASE)
+#include <intrin.h>
enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
assert(type == __ATOMIC_ACQUIRE);
+ // These return the previous value - but since we do an OR with 0,
+ // it's equivalent to a plain load.
#ifdef _WIN64
- return (uintptr_t) _load_be_u64(ptr);
+ return InterlockedOr64(ptr, 0);
#else
- return (uintptr_t) _load_be_u32(ptr);
+ return InterlockedOr(ptr, 0);
#endif
}
static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
assert(type == __ATOMIC_RELEASE);
-#ifdef _WIN64
- _store_be_u64(ptr, val);
-#else
- _store_be_u32(ptr, val);
-#endif
+ InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
}
#endif
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
-#include <Windows.h>
+#include <windows.h>
#else
#ifndef __APPLE__
#include <unistd.h>
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_h2f(uint16_t a) {
return __extendhfsf2(a);
}
+#else
+AEABI_RTABI float __aeabi_h2f(uint16_t a) COMPILER_RT_ALIAS(__extendhfsf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_f2d(float a) {
return __extendsfdf2(a);
}
+#else
+AEABI_RTABI double __aeabi_f2d(float a) COMPILER_RT_ALIAS(__extendsfdf2);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int
-#if defined(__SOFT_FP__)
-__aeabi_d2lz(fp_t a) {
-#else
-__aeabi_d2lz(double a) {
-#endif
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI di_int __aeabi_d2lz(fp_t a) {
return __fixdfdi(a);
}
+#else
+AEABI_RTABI di_int __aeabi_d2lz(fp_t a) COMPILER_RT_ALIAS(__fixdfdi);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) {
return __fixdfsi(a);
}
+#else
+AEABI_RTABI si_int __aeabi_d2iz(fp_t a) COMPILER_RT_ALIAS(__fixdfsi);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int
-#if defined(__SOFT_FP__)
-__aeabi_f2lz(fp_t a) {
-#else
-__aeabi_f2lz(float a) {
-#endif
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI di_int __aeabi_f2lz(fp_t a) {
return __fixsfdi(a);
}
+#else
+AEABI_RTABI di_int __aeabi_f2lz(fp_t a) COMPILER_RT_ALIAS(__fixsfdi);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) {
return __fixsfsi(a);
}
+#else
+AEABI_RTABI si_int __aeabi_f2iz(fp_t a) COMPILER_RT_ALIAS(__fixsfsi);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
-AEABI_RTABI du_int
-#if defined(__SOFT_FP__)
-__aeabi_d2ulz(fp_t a) {
-#else
-__aeabi_d2ulz(double a) {
-#endif
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) {
return __fixunsdfdi(a);
}
+#else
+AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfdi);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) {
return __fixunsdfsi(a);
}
+#else
+AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfsi);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
-AEABI_RTABI du_int
-#if defined(__SOFT_FP__)
-__aeabi_f2ulz(fp_t a) {
-#else
-__aeabi_f2ulz(float a) {
-#endif
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) {
return __fixunssfdi(a);
}
+#else
+AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunssfdi);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) {
return __fixunssfsi(a);
}
+#else
+AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunssfsi);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_l2d(di_int a) {
return __floatdidf(a);
}
+#else
+AEABI_RTABI double __aeabi_l2d(di_int a) COMPILER_RT_ALIAS(__floatdidf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_l2f(di_int a) {
return __floatdisf(a);
}
+#else
+AEABI_RTABI float __aeabi_l2f(di_int a) COMPILER_RT_ALIAS(__floatdisf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2d(int a) {
return __floatsidf(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_i2d(int a) COMPILER_RT_ALIAS(__floatsidf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2f(int a) {
return __floatsisf(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_i2f(int a) COMPILER_RT_ALIAS(__floatsisf);
+#endif
#endif
-
#endif
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_ul2d(du_int a) {
return __floatundidf(a);
}
+#else
+AEABI_RTABI double __aeabi_ul2d(du_int a) COMPILER_RT_ALIAS(__floatundidf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_ul2f(du_int a) {
return __floatundisf(a);
}
+#else
+AEABI_RTABI float __aeabi_ul2f(du_int a) COMPILER_RT_ALIAS(__floatundisf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) {
return __floatunsidf(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) COMPILER_RT_ALIAS(__floatunsidf);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) {
return __floatunsisf(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) COMPILER_RT_ALIAS(__floatunsisf);
+#endif
#endif
-
#endif /* *BSD */
-#if defined(__OpenBSD__) || defined(__Bitrig__)
+#if defined(__OpenBSD__)
#include <machine/endian.h>
#if _BYTE_ORDER == _BIG_ENDIAN
#define _YUGA_BIG_ENDIAN 0
#endif /* _BYTE_ORDER */
-#endif /* OpenBSD and Bitrig. */
+#endif /* OpenBSD */
/* .. */
#if defined(__ELF__)
#define FNALIAS(alias_name, original_name) \
- void alias_name() __attribute__((alias(#original_name)))
+ void alias_name() __attribute__((__alias__(#original_name)))
+#define COMPILER_RT_ALIAS(aliasee) __attribute__((__alias__(#aliasee)))
#else
#define FNALIAS(alias, name) _Pragma("GCC error(\"alias unsupported on this file format\")")
+#define COMPILER_RT_ALIAS(aliasee) _Pragma("GCC error(\"alias unsupported on this file format\")")
#endif
/* ABI macro definitions */
}
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int __aeabi_llsr(di_int a, si_int b) {
- return __lshrdi3(a, b);
-}
+AEABI_RTABI di_int __aeabi_llsr(di_int a, si_int b) COMPILER_RT_ALIAS(__lshrdi3);
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_dmul(fp_t a, fp_t b) {
return __muldf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_dmul(fp_t a, fp_t b) COMPILER_RT_ALIAS(__muldf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
-AEABI_RTABI di_int __aeabi_lmul(di_int a, di_int b) {
- return __muldi3(a, b);
-}
+AEABI_RTABI di_int __aeabi_lmul(di_int a, di_int b) COMPILER_RT_ALIAS(__muldi3);
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fmul(fp_t a, fp_t b) {
return __mulsf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_fmul(fp_t a, fp_t b) COMPILER_RT_ALIAS(__mulsf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_dneg(fp_t a) {
return __negdf2(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_dneg(fp_t a) COMPILER_RT_ALIAS(__negdf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fneg(fp_t a) {
return __negsf2(a);
}
+#else
+AEABI_RTABI fp_t __aeabi_fneg(fp_t a) COMPILER_RT_ALIAS(__negsf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_dsub(fp_t a, fp_t b) {
return __subdf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_dsub(fp_t a, fp_t b) COMPILER_RT_ALIAS(__subdf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fsub(fp_t a, fp_t b) {
return __subsf3(a, b);
}
+#else
+AEABI_RTABI fp_t __aeabi_fsub(fp_t a, fp_t b) COMPILER_RT_ALIAS(__subsf3);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI uint16_t __aeabi_d2h(double a) {
return __truncdfhf2(a);
}
+#else
+AEABI_RTABI uint16_t __aeabi_d2h(double a) COMPILER_RT_ALIAS(__truncdfhf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_d2f(double a) {
return __truncdfsf2(a);
}
+#else
+AEABI_RTABI float __aeabi_d2f(double a) COMPILER_RT_ALIAS(__truncdfsf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI uint16_t __aeabi_f2h(float a) {
return __truncsfhf2(a);
}
+#else
+AEABI_RTABI uint16_t __aeabi_f2h(float a) COMPILER_RT_ALIAS(__truncsfhf2);
+#endif
#endif
-
}
#if defined(__ARM_EABI__)
-AEABI_RTABI su_int __aeabi_uidiv(su_int n, su_int d) {
- return __udivsi3(n, d);
-}
+AEABI_RTABI su_int __aeabi_uidiv(su_int n, su_int d) COMPILER_RT_ALIAS(__udivsi3);
#endif
-