21#if defined(HAVE_CONFIG_H) && !defined(HAVE_LARGE_FILE_SUPPORT)
22extern "C" int fcntl_old(
int fd,
int cmd, ...);
24__asm(
".symver fcntl_old,fcntl@GLIBC_2.0");
25#elif defined(__amd64__)
26__asm(
".symver fcntl_old,fcntl@GLIBC_2.2.5");
28__asm(
".symver fcntl_old,fcntl@GLIBC_2.4");
29#elif defined(__aarch64__)
30__asm(
".symver fcntl_old,fcntl@GLIBC_2.17");
33extern "C" int __wrap_fcntl(
int fd,
int cmd, ...) {
37 ret = fcntl_old(fd, cmd, va_arg(vargs,
void *));
41extern "C" int __wrap_fcntl64(
int fd,
int cmd, ...) {
45 ret = fcntl_old(fd, cmd, va_arg(vargs,
void *));
57__asm(
".symver exp_old,exp@GLIBC_2.1");
58#elif defined(__amd64__)
59__asm(
".symver exp_old,exp@GLIBC_2.2.5");
61__asm(
".symver exp_old,exp@GLIBC_2.4");
62#elif defined(__aarch64__)
63__asm(
".symver exp_old,exp@GLIBC_2.17");
71__asm(
".symver log_old,log@GLIBC_2.1");
72#elif defined(__amd64__)
73__asm(
".symver log_old,log@GLIBC_2.2.5");
75__asm(
".symver log_old,log@GLIBC_2.4");
76#elif defined(__aarch64__)
77__asm(
".symver log_old,log@GLIBC_2.17");
85__asm(
".symver log2_old,log2@GLIBC_2.1");
86#elif defined(__amd64__)
87__asm(
".symver log2_old,log2@GLIBC_2.2.5");
89__asm(
".symver log2_old,log2@GLIBC_2.4");
90#elif defined(__aarch64__)
91__asm(
".symver log2_old,log2@GLIBC_2.17");
99__asm(
".symver pow_old,pow@GLIBC_2.1");
100#elif defined(__amd64__)
101__asm(
".symver pow_old,pow@GLIBC_2.2.5");
102#elif defined(__arm__)
103__asm(
".symver pow_old,pow@GLIBC_2.4");
104#elif defined(__aarch64__)
105__asm(
".symver pow_old,pow@GLIBC_2.17");
float __wrap_exp(float x)
float __wrap_log2(float x)
float __wrap_log(float x)
float __wrap_pow(float x)
float exp_old(float x)
Starting with GLIBC_2.29 there is an optimized version of the math functions.