OpenSDE Packages Database (without history before r20070)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2937 lines
67 KiB

  1. # --- T2-COPYRIGHT-NOTE-BEGIN ---
  2. # This copyright note is auto-generated by ./scripts/Create-CopyPatch.
  3. #
  4. # T2 SDE: package/.../glibc/x86_64-string.patch
  5. # Copyright (C) 2006 The T2 SDE Project
  6. #
  7. # More information can be found in the files COPYING and README.
  8. #
  9. # This patch file is dual-licensed. It is available under the license the
  10. # patched project is licensed under, as long as it is an OpenSource license
  11. # as defined at http://www.opensource.org/ (e.g. BSD, X11) or under the terms
  12. # of the GNU General Public License as published by the Free Software
  13. # Foundation; either version 2 of the License, or (at your option) any later
  14. # version.
  15. # --- T2-COPYRIGHT-NOTE-END ---
  16. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c libc/sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c
  17. --- libc/sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c 2005-12-14 02:09:28.000000000 -0600
  18. +++ libc/sysdeps/unix/sysv/linux/x86_64/dl-procinfo.c 2006-04-14 16:38:16.819949000 -0500
  19. @@ -1,5 +1,5 @@
  20. #ifdef IS_IN_ldconfig
  21. # include <sysdeps/i386/dl-procinfo.c>
  22. #else
  23. -# include <sysdeps/generic/dl-procinfo.c>
  24. +# include <sysdeps/x86_64/dl-procinfo.c>
  25. #endif
  26. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/bzero.S libc/sysdeps/x86_64/bzero.S
  27. --- libc/sysdeps/x86_64/bzero.S 2002-08-31 12:30:07.000000000 -0500
  28. +++ libc/sysdeps/x86_64/bzero.S 2006-05-05 15:23:27.884691000 -0500
  29. @@ -1,3 +1,5 @@
  30. +#define USE_AS_BZERO
  31. #define memset __bzero
  32. #include <sysdeps/x86_64/memset.S>
  33. +
  34. weak_alias (__bzero, bzero)
  35. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/dl-machine.h libc/sysdeps/x86_64/dl-machine.h
  36. --- libc/sysdeps/x86_64/dl-machine.h 2005-07-31 12:49:44.000000000 -0500
  37. +++ libc/sysdeps/x86_64/dl-machine.h 2006-05-09 15:17:03.570496000 -0500
  38. @@ -1,4 +1,5 @@
  39. -/* Machine-dependent ELF dynamic relocation inline functions. x86-64 version.
  40. +/* Machine-dependent ELF dynamic relocation inline functions (x86-64 version).
  41. +
  42. Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
  43. This file is part of the GNU C Library.
  44. Contributed by Andreas Jaeger <aj@suse.de>.
  45. @@ -219,6 +220,53 @@ dl_platform_init (void)
  46. if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
  47. /* Avoid an empty string which would disturb us. */
  48. GLRO(dl_platform) = NULL;
  49. +
  50. + asm volatile
  51. + (
  52. + "mov $0x80000000, %%eax # get highest level of support \n\t"
  53. + "cpuid \n\t"
  54. + "cmp $0x80000006, %%eax # check for L2 info support \n\t"
  55. + "jb 1f \n\t"
  56. + "xor %%eax, %%eax # get manufacturer string \n\t"
  57. + "cpuid \n\t"
  58. + "cmp $0x68747541, %%ebx # check for 'Auth'... \n\t"
  59. + "jne 4f \n\t"
  60. + "cmp $0x69746e65, %%edx # 'enti'... \n\t"
  61. + "jne 4f \n\t"
  62. + "cmp $0x444d4163, %%ecx # 'cAMD' \n\t"
  63. + "je 2f \n\t"
  64. + "4: \n\t"
  65. + "cmp $0x756e6547, %%ebx # check for 'Genu'... \n\t"
  66. + "jne 1f \n\t"
  67. + "cmp $0x49656e69, %%edx # 'ineI'... \n\t"
  68. + "jne 1f \n\t"
  69. + "cmp $0x6c65746e, %%ecx # 'ntel' \n\t"
  70. + "je 3f \n\t"
  71. + "jmp 1f \n\t"
  72. + "2: # AMD \n\t"
  73. + "mov $0x80000001, %%eax # get features support\n\t"
  74. + "cpuid \n\t"
  75. + "test $1 << 31, %%edx # check for 3DNow! support\n\t"
  76. + "setnzb %2 \n\t"
  77. + "mov $0x80000005, %%eax # get L1 info for AMD\n\t"
  78. + "cpuid \n\t"
  79. + "shr $24, %%ecx \n\t"
  80. + "shl $10, %%ecx # convert from KB to B \n\t"
  81. + "mov %%rcx, %0 \n\t"
  82. + "3: # AMD, Intel \n\t"
  83. + "mov $0x80000006, %%eax # get L2 info\n\t"
  84. + "cpuid \n\t"
  85. + "shr $16, %%ecx \n\t"
  86. + "shl $10, %%ecx # convert from KB to B \n\t"
  87. + "mov %%rcx, %1 \n\t"
  88. + "1: # other manufacturers\n\t"
  89. + : "=m" (GLRO (dl_cache1size)), "=m" (GLRO (dl_cache2size)), "=m" (GLRO (dl_prefetchw))
  90. + :
  91. + : "%rax", "%rbx", "%rcx", "%rdx", "cc"
  92. + );
  93. +
  94. + GLRO (dl_cache1sizehalf) = GLRO (dl_cache1size) / 2;
  95. + GLRO (dl_cache2sizehalf) = GLRO (dl_cache2size) / 2;
  96. }
  97. static inline Elf64_Addr
  98. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/dl-procinfo.c libc/sysdeps/x86_64/dl-procinfo.c
  99. --- libc/sysdeps/x86_64/dl-procinfo.c 1969-12-31 18:00:00.000000000 -0600
  100. +++ libc/sysdeps/x86_64/dl-procinfo.c 2006-05-05 16:23:38.030341000 -0500
  101. @@ -0,0 +1,123 @@
  102. +/* Data for x86-64 version of processor capability information.
  103. + Copyright (C) 2004 Free Software Foundation, Inc.
  104. + This file is part of the GNU C Library.
  105. + Contributed by Andreas Jaeger <aj@suse.de>, 2004.
  106. +
  107. + The GNU C Library is free software; you can redistribute it and/or
  108. + modify it under the terms of the GNU Lesser General Public
  109. + License as published by the Free Software Foundation; either
  110. + version 2.1 of the License, or (at your option) any later version.
  111. +
  112. + The GNU C Library is distributed in the hope that it will be useful,
  113. + but WITHOUT ANY WARRANTY; without even the implied warranty of
  114. + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  115. + Lesser General Public License for more details.
  116. +
  117. + You should have received a copy of the GNU Lesser General Public
  118. + License along with the GNU C Library; if not, write to the Free
  119. + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  120. + 02111-1307 USA. */
  121. +
  122. +/* This information must be kept in sync with the _DL_HWCAP_COUNT and
  123. + _DL_PLATFORM_COUNT definitions in procinfo.h.
  124. +
  125. + If anything should be added here check whether the size of each string
  126. + is still ok with the given array size.
  127. +
  128. + All the #ifdefs in the definitions ar equite irritating but
  129. + necessary if we want to avoid duplicating the information. There
  130. + are three different modes:
  131. +
  132. + - PROCINFO_DECL is defined. This means we are only interested in
  133. + declarations.
  134. +
  135. + - PROCINFO_DECL is not defined:
  136. +
  137. + + if SHARED is defined the file is included in an array
  138. + initializer. The .element = { ... } syntax is needed.
  139. +
  140. + + if SHARED is not defined a normal array initialization is
  141. + needed.
  142. + */
  143. +
  144. +#ifndef PROCINFO_CLASS
  145. +#define PROCINFO_CLASS
  146. +#endif
  147. +
  148. + /* _dl_cache1size: size of L1 cache */
  149. +#if !defined PROCINFO_DECL && defined SHARED
  150. + ._dl_cache1size
  151. +#else
  152. +PROCINFO_CLASS long int _dl_cache1size
  153. +#endif
  154. +#ifndef PROCINFO_DECL
  155. += 1024 * 32 /* defaults to 32 */
  156. +#endif
  157. +#if !defined SHARED || defined PROCINFO_DECL
  158. +;
  159. +#else
  160. +,
  161. +#endif
  162. +
  163. + /* _dl_cache1sizehalf: 1/2 size of L1 cache */
  164. +#if !defined PROCINFO_DECL && defined SHARED
  165. + ._dl_cache1sizehalf
  166. +#else
  167. +PROCINFO_CLASS long int _dl_cache1sizehalf
  168. +#endif
  169. +#ifndef PROCINFO_DECL
  170. += 1024 * 32 / 2 /* defaults to 16k */
  171. +#endif
  172. +#if !defined SHARED || defined PROCINFO_DECL
  173. +;
  174. +#else
  175. +,
  176. +#endif
  177. +
  178. + /* _dl_cache2size: size of L2 cache */
  179. +#if !defined PROCINFO_DECL && defined SHARED
  180. + ._dl_cache2size
  181. +#else
  182. +PROCINFO_CLASS long int _dl_cache2size
  183. +#endif
  184. +#ifndef PROCINFO_DECL
  185. += 1024 * 1024 /* defaults to 1M */
  186. +#endif
  187. +#if !defined SHARED || defined PROCINFO_DECL
  188. +;
  189. +#else
  190. +,
  191. +#endif
  192. +
  193. + /* _dl_cache2sizehalf: 1/2 size of L2 cache */
  194. +#if !defined PROCINFO_DECL && defined SHARED
  195. + ._dl_cache2sizehalf
  196. +#else
  197. +PROCINFO_CLASS long int _dl_cache2sizehalf
  198. +#endif
  199. +#ifndef PROCINFO_DECL
  200. += 1024 * 1024 / 2 /* defaults to 512k */
  201. +#endif
  202. +#if !defined SHARED || defined PROCINFO_DECL
  203. +;
  204. +#else
  205. +,
  206. +#endif
  207. +
  208. + /* _dl_prefetchw: prefetchw supported */
  209. +#if !defined PROCINFO_DECL && defined SHARED
  210. + ._dl_prefetchw
  211. +#else
  212. +PROCINFO_CLASS int _dl_prefetchw
  213. +#endif
  214. +#ifndef PROCINFO_DECL
  215. += 0 /* defaults to no */
  216. +#endif
  217. +#if !defined SHARED || defined PROCINFO_DECL
  218. +;
  219. +#else
  220. +,
  221. +#endif
  222. +
  223. +#undef PROCINFO_DECL
  224. +#undef PROCINFO_CLASS
  225. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/elf/rtld-global-offsets.sym libc/sysdeps/x86_64/elf/rtld-global-offsets.sym
  226. --- libc/sysdeps/x86_64/elf/rtld-global-offsets.sym 1969-12-31 18:00:00.000000000 -0600
  227. +++ libc/sysdeps/x86_64/elf/rtld-global-offsets.sym 2006-04-18 14:46:40.056693000 -0500
  228. @@ -0,0 +1,11 @@
  229. +#define SHARED 1
  230. +
  231. +#include <ldsodefs.h>
  232. +
  233. +#define rtdl_global_offsetof(mem) offsetof (struct rtld_global_ro, mem)
  234. +
  235. +RTLD_GLOBAL_DL_CACHE1SIZE rtdl_global_offsetof (_dl_cache1size)
  236. +RTLD_GLOBAL_DL_CACHE1SIZEHALF rtdl_global_offsetof (_dl_cache1sizehalf)
  237. +RTLD_GLOBAL_DL_CACHE2SIZE rtdl_global_offsetof (_dl_cache2size)
  238. +RTLD_GLOBAL_DL_CACHE2SIZEHALF rtdl_global_offsetof (_dl_cache2sizehalf)
  239. +RTLD_GLOBAL_DL_PREFETCHW rtdl_global_offsetof (_dl_prefetchw)
  240. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/Makefile libc/sysdeps/x86_64/Makefile
  241. --- libc/sysdeps/x86_64/Makefile 2004-08-16 01:46:14.000000000 -0500
  242. +++ libc/sysdeps/x86_64/Makefile 2006-04-14 16:38:16.802950000 -0500
  243. @@ -4,6 +4,9 @@ long-double-fcts = yes
  244. ifeq ($(subdir),csu)
  245. sysdep_routines += hp-timing
  246. elide-routines.os += hp-timing
  247. +
  248. +# get offset to rtld_global._dl_*
  249. +gen-as-const-headers += rtld-global-offsets.sym
  250. endif
  251. ifeq ($(subdir),gmon)
  252. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/memcmp.S libc/sysdeps/x86_64/memcmp.S
  253. --- libc/sysdeps/x86_64/memcmp.S 1969-12-31 18:00:00.000000000 -0600
  254. +++ libc/sysdeps/x86_64/memcmp.S 2006-05-18 14:43:07.611277000 -0500
  255. @@ -0,0 +1,328 @@
  256. +# (c) 2002 Advanced Micro Devices, Inc.
  257. +# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
  258. +# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
  259. +# LICENSE FOUND IN THE "README" FILE THAT IS
  260. +# INCLUDED WITH THIS FILE
  261. +
  262. +#include "sysdep.h"
  263. +#if defined PIC && defined SHARED
  264. +# include <rtld-global-offsets.h>
  265. +#endif
  266. +
  267. +#if defined PIC && defined SHARED
  268. + .globl _rtld_local_ro
  269. + .hidden _rtld_local_ro
  270. + .set _rtld_local_ro,_rtld_global_ro
  271. +#endif
  272. +
  273. + .text
  274. +
  275. +ENTRY (memcmp) # (const void *, const void*, size_t)
  276. +
  277. +L(try1): # up to 8B
  278. + cmp $8, %rdx
  279. + jae L(1after)
  280. +
  281. +L(1): # 1-byte
  282. + test %rdx, %rdx
  283. + mov $0, %eax
  284. + jz L(exit)
  285. +
  286. +L(1loop):
  287. + movzbl (%rdi), %eax
  288. + movzbl (%rsi), %ecx
  289. + sub %ecx, %eax
  290. + jnz L(exit)
  291. +
  292. + dec %rdx
  293. +
  294. + lea 1 (%rdi), %rdi
  295. + lea 1 (%rsi), %rsi
  296. +
  297. + jnz L(1loop)
  298. +
  299. +L(exit):
  300. + rep
  301. + ret
  302. +
  303. + .p2align 4
  304. +
  305. +L(1after):
  306. +
  307. +L(8try): # up to 32B
  308. + cmp $32, %rdx
  309. + jae L(8after)
  310. +
  311. +L(8): # 8-byte
  312. + mov %edx, %ecx
  313. + shr $3, %ecx
  314. + jz L(1)
  315. +
  316. + .p2align 4
  317. +
  318. +L(8loop):
  319. + mov (%rsi), %rax
  320. + cmp (%rdi), %rax
  321. + jne L(1)
  322. +
  323. + sub $8, %rdx
  324. + dec %ecx
  325. +
  326. + lea 8 (%rsi), %rsi
  327. + lea 8 (%rdi), %rdi
  328. +
  329. + jnz L(8loop)
  330. +
  331. +L(8skip):
  332. + and $7, %edx
  333. + jnz L(1)
  334. +
  335. + xor %eax, %eax
  336. + ret
  337. +
  338. + .p2align 4
  339. +
  340. +L(8after):
  341. +
  342. +L(32try): # up to 2KB
  343. + cmp $2048, %rdx
  344. + ja L(32after)
  345. +
  346. +L(32): # 32-byte
  347. + mov %edx, %ecx
  348. + shr $5, %ecx
  349. + jz L(8)
  350. +
  351. + .p2align 4
  352. +
  353. +L(32loop):
  354. + mov (%rsi), %rax
  355. + mov 8 (%rsi), %r8
  356. + mov 16 (%rsi), %r9
  357. + mov 24 (%rsi), %r10
  358. + sub (%rdi), %rax
  359. + sub 8 (%rdi), %r8
  360. + sub 16 (%rdi), %r9
  361. + sub 24 (%rdi), %r10
  362. +
  363. + or %rax, %r8
  364. + or %r9, %r10
  365. + or %r8, %r10
  366. + jnz L(8)
  367. +
  368. + sub $32, %rdx
  369. + dec %ecx
  370. +
  371. + lea 32 (%rsi), %rsi
  372. + lea 32 (%rdi), %rdi
  373. +
  374. + jnz L(32loop)
  375. +
  376. +L(32skip):
  377. + and $31, %edx
  378. + jnz L(8)
  379. +
  380. + xor %eax, %eax
  381. + ret
  382. +
  383. + .p2align 4
  384. +
  385. +L(32after):
  386. +
  387. +L(srctry):
  388. + mov %esi, %r8d # align by source
  389. +
  390. + and $7, %r8d
  391. + jz L(srcafter) # not unaligned
  392. +
  393. +L(src): # align
  394. + lea -8 (%r8, %rdx), %rdx
  395. + sub $8, %r8d
  396. +
  397. +# .p2align 4
  398. +
  399. +L(srcloop):
  400. + movzbl (%rdi), %eax
  401. + movzbl (%rsi), %ecx
  402. + sub %ecx, %eax
  403. + jnz L(exit)
  404. +
  405. + inc %r8d
  406. +
  407. + lea 1 (%rdi), %rdi
  408. + lea 1 (%rsi), %rsi
  409. +
  410. + jnz L(srcloop)
  411. +
  412. + .p2align 4
  413. +
  414. +L(srcafter):
  415. +
  416. +L(64try): # up to 1/2 L1
  417. +#ifdef PIC
  418. +# ifdef SHARED
  419. + mov _rtld_local_ro@GOTPCREL (%rip), %rcx
  420. + mov RTLD_GLOBAL_DL_CACHE1SIZEHALF (%rcx), %rcx
  421. +# else
  422. + mov _dl_cache1sizehalf@GOTPCREL (%rip), %rcx
  423. + mov (%rcx), %rcx
  424. +# endif
  425. +#else
  426. + mov _dl_cache1sizehalf, %rcx
  427. +#endif
  428. + cmp %rdx, %rcx
  429. + cmova %rdx, %rcx
  430. +
  431. +L(64): # 64-byte
  432. + shr $6, %rcx
  433. + jz L(32)
  434. +
  435. + .p2align 4
  436. +
  437. +L(64loop):
  438. + mov (%rsi), %rax
  439. + mov 8 (%rsi), %r8
  440. + sub (%rdi), %rax
  441. + sub 8 (%rdi), %r8
  442. + or %r8, %rax
  443. +
  444. + mov 16 (%rsi), %r9
  445. + mov 24 (%rsi), %r10
  446. + sub 16 (%rdi), %r9
  447. + sub 24 (%rdi), %r10
  448. + or %r10, %r9
  449. +
  450. + or %r9, %rax
  451. + jnz L(32)
  452. +
  453. + mov 32 (%rsi), %rax
  454. + mov 40 (%rsi), %r8
  455. + sub 32 (%rdi), %rax
  456. + sub 40 (%rdi), %r8
  457. + or %r8, %rax
  458. +
  459. + mov 48 (%rsi), %r9
  460. + mov 56 (%rsi), %r10
  461. + sub 48 (%rdi), %r9
  462. + sub 56 (%rdi), %r10
  463. + or %r10, %r9
  464. +
  465. + or %r9, %rax
  466. + jnz L(32)
  467. +
  468. + lea 64 (%rsi), %rsi
  469. + lea 64 (%rdi), %rdi
  470. +
  471. + sub $64, %rdx
  472. + dec %rcx
  473. + jnz L(64loop)
  474. +
  475. +# .p2align 4
  476. +
  477. +L(64skip):
  478. + cmp $2048, %rdx
  479. + ja L(64after)
  480. +
  481. + test %edx, %edx
  482. + jnz L(32)
  483. +
  484. + xor %eax, %eax
  485. + ret
  486. +
  487. + .p2align 4
  488. +
  489. +L(64after):
  490. +
  491. +L(128try):
  492. +
  493. +L(128): # 128-byte
  494. + mov %rdx, %rcx
  495. + shr $7, %rcx
  496. + jz L(128skip)
  497. +
  498. + .p2align 4
  499. +
  500. +L(128loop):
  501. + prefetcht0 512 (%rsi)
  502. + prefetcht0 512 (%rdi)
  503. +
  504. + mov (%rsi), %rax
  505. + mov 8 (%rsi), %r8
  506. + sub (%rdi), %rax
  507. + sub 8 (%rdi), %r8
  508. + mov 16 (%rsi), %r9
  509. + mov 24 (%rsi), %r10
  510. + sub 16 (%rdi), %r9
  511. + sub 24 (%rdi), %r10
  512. +
  513. + or %r8, %rax
  514. + or %r9, %r10
  515. + or %r10, %rax
  516. +
  517. + mov 32 (%rsi), %r8
  518. + mov 40 (%rsi), %r9
  519. + sub 32 (%rdi), %r8
  520. + sub 40 (%rdi), %r9
  521. + mov 48 (%rsi), %r10
  522. + mov 56 (%rsi), %r11
  523. + sub 48 (%rdi), %r10
  524. + sub 56 (%rdi), %r11
  525. +
  526. + or %r9, %r8
  527. + or %r11, %r10
  528. + or %r10, %r8
  529. +
  530. + or %r8, %rax
  531. + jnz L(32)
  532. +
  533. + prefetcht0 576 (%rsi)
  534. + prefetcht0 576 (%rdi)
  535. +
  536. + mov 64 (%rsi), %rax
  537. + mov 72 (%rsi), %r8
  538. + sub 64 (%rdi), %rax
  539. + sub 72 (%rdi), %r8
  540. + mov 80 (%rsi), %r9
  541. + mov 88 (%rsi), %r10
  542. + sub 80 (%rdi), %r9
  543. + sub 88 (%rdi), %r10
  544. +
  545. + or %r8, %rax
  546. + or %r9, %r10
  547. + or %r10, %rax
  548. +
  549. + mov 96 (%rsi), %r8
  550. + mov 104 (%rsi), %r9
  551. + sub 96 (%rdi), %r8
  552. + sub 104 (%rdi), %r9
  553. + mov 112 (%rsi), %r10
  554. + mov 120 (%rsi), %r11
  555. + sub 112 (%rdi), %r10
  556. + sub 120 (%rdi), %r11
  557. +
  558. + or %r9, %r8
  559. + or %r11, %r10
  560. + or %r10, %r8
  561. +
  562. + or %r8, %rax
  563. + jnz L(32)
  564. +
  565. + sub $128, %rdx
  566. + dec %rcx
  567. +
  568. + lea 128 (%rsi), %rsi
  569. + lea 128 (%rdi), %rdi
  570. +
  571. + jnz L(128loop)
  572. +
  573. +L(128skip):
  574. + and $127, %edx
  575. + jnz L(32)
  576. +
  577. + xor %eax, %eax
  578. + ret
  579. +
  580. +END (memcmp)
  581. +
  582. +#undef bcmp
  583. +weak_alias (memcmp, bcmp)
  584. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/memcpy.S libc/sysdeps/x86_64/memcpy.S
  585. --- libc/sysdeps/x86_64/memcpy.S 2004-10-17 23:17:08.000000000 -0500
  586. +++ libc/sysdeps/x86_64/memcpy.S 2006-05-18 15:23:45.311446000 -0500
  587. @@ -1,32 +1,22 @@
  588. -/* Highly optimized version for x86-64.
  589. - Copyright (C) 1997, 2000, 2002, 2003, 2004 Free Software Foundation, Inc.
  590. - This file is part of the GNU C Library.
  591. - Based on i586 version contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
  592. -
  593. - The GNU C Library is free software; you can redistribute it and/or
  594. - modify it under the terms of the GNU Lesser General Public
  595. - License as published by the Free Software Foundation; either
  596. - version 2.1 of the License, or (at your option) any later version.
  597. -
  598. - The GNU C Library is distributed in the hope that it will be useful,
  599. - but WITHOUT ANY WARRANTY; without even the implied warranty of
  600. - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  601. - Lesser General Public License for more details.
  602. -
  603. - You should have received a copy of the GNU Lesser General Public
  604. - License along with the GNU C Library; if not, write to the Free
  605. - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  606. - 02111-1307 USA. */
  607. +# (c) 2002 Advanced Micro Devices, Inc.
  608. +# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
  609. +# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
  610. +# LICENSE FOUND IN THE "README" FILE THAT IS
  611. +# INCLUDED WITH THIS FILE
  612. #include <sysdep.h>
  613. #include "asm-syntax.h"
  614. #include "bp-sym.h"
  615. #include "bp-asm.h"
  616. +#if defined PIC && defined SHARED
  617. +# include <rtld-global-offsets.h>
  618. +#endif
  619. -/* BEWARE: `#ifdef memcpy' means that memcpy is redefined as `mempcpy',
  620. - and the return value is the byte after the last one copied in
  621. - the destination. */
  622. -#define MEMPCPY_P (defined memcpy)
  623. +#if defined PIC && defined SHARED
  624. + .globl _rtld_local_ro
  625. + .hidden _rtld_local_ro
  626. + .set _rtld_local_ro,_rtld_global_ro
  627. +#endif
  628. .text
  629. #if defined PIC && !defined NOT_IN_libc
  630. @@ -35,67 +25,480 @@ ENTRY (__memcpy_chk)
  631. jb HIDDEN_JUMPTARGET (__chk_fail)
  632. END (__memcpy_chk)
  633. #endif
  634. -ENTRY (BP_SYM (memcpy))
  635. - /* Cutoff for the big loop is a size of 32 bytes since otherwise
  636. - the loop will never be entered. */
  637. - cmpq $32, %rdx
  638. - movq %rdx, %rcx
  639. -#if !MEMPCPY_P
  640. - movq %rdi, %r10 /* Save value. */
  641. +
  642. +ENTRY (memcpy) # (void *, const void*, size_t)
  643. +
  644. +L(1try): # up to 16B
  645. + cmp $16, %rdx
  646. +#if defined (USE_AS_MEMPCPY)
  647. + lea (%rdi, %rdx), %rax
  648. +#else
  649. + mov %rdi, %rax
  650. #endif
  651. + jae L(1after)
  652. +
  653. +L(1): # 1-byte once
  654. + test $1, %dl
  655. + jz L(1a)
  656. +
  657. + movzbl (%rsi), %ecx
  658. + mov %cl, (%rdi)
  659. +
  660. + inc %rsi
  661. + inc %rdi
  662. +
  663. +L(1a): # 2-byte once
  664. + test $2, %dl
  665. + jz L(1b)
  666. +
  667. + movzwl (%rsi), %ecx
  668. + mov %cx, (%rdi)
  669. +
  670. + add $2, %rsi
  671. + add $2, %rdi
  672. +
  673. +L(1b): # 4-byte once
  674. + test $4, %dl
  675. + jz L(1c)
  676. +
  677. + mov (%rsi), %ecx
  678. + mov %ecx, (%rdi)
  679. +
  680. + add $4, %rsi
  681. + add $4, %rdi
  682. +
  683. +L(1c): # 8-byte once
  684. + test $8, %dl
  685. + jz L(exit)
  686. +
  687. + mov (%rsi), %rcx
  688. + mov %rcx, (%rdi)
  689. +
  690. +L(exit):
  691. + rep
  692. + ret
  693. +
  694. + .p2align 4
  695. +
  696. +L(1after):
  697. + push %rax
  698. +
  699. +L(8try): # up to 32B
  700. + cmp $32, %rdx
  701. + jae L(8after)
  702. +
  703. +L(8): # 8-byte loop
  704. + mov %edx, %ecx
  705. + shr $3, %ecx
  706. + jz L(8skip)
  707. +
  708. + .p2align 4
  709. +
  710. +L(8loop):
  711. + dec %ecx
  712. +
  713. + mov (%rsi), %rax
  714. + mov %rax, (%rdi)
  715. +
  716. + lea 8 (%rsi), %rsi
  717. + lea 8 (%rdi), %rdi
  718. +
  719. + jnz L(8loop)
  720. +
  721. +L(8skip):
  722. + and $7, %edx # check for left overs
  723. + pop %rax
  724. + jnz L(1)
  725. +
  726. + rep
  727. + ret
  728. +
  729. + .p2align 4
  730. +
  731. +L(8after):
  732. +
  733. +L(aligntry):
  734. + mov %edi, %r8d # align by destination
  735. +
  736. + and $7, %r8d
  737. + jz L(alignafter) # not unaligned
  738. +
  739. +L(align): # align
  740. + lea -8 (%r8, %rdx), %rdx
  741. + sub $8, %r8d
  742. +
  743. + .p2align 4
  744. +
  745. +L(alignloop):
  746. + inc %r8d
  747. +
  748. + mov (%rsi), %al
  749. + mov %al, (%rdi)
  750. +
  751. + lea 1 (%rsi), %rsi
  752. + lea 1 (%rdi), %rdi
  753. +
  754. + jnz L(alignloop)
  755. +
  756. + .p2align 4
  757. +
  758. +L(alignafter):
  759. +
  760. +L(32try): # up to 1KB
  761. + cmp $1024, %rdx
  762. + ja L(32after)
  763. +
  764. +L(32): # 32-byte loop
  765. + mov %edx, %ecx
  766. + shr $5, %ecx
  767. + jz L(32skip)
  768. +
  769. + .p2align 4
  770. +
  771. +L(32loop):
  772. + dec %ecx
  773. +
  774. + mov (%rsi), %rax
  775. + mov 8 (%rsi), %r8
  776. + mov 16 (%rsi), %r9
  777. + mov 24 (%rsi), %r10
  778. +
  779. + mov %rax, (%rdi)
  780. + mov %r8, 8 (%rdi)
  781. + mov %r9, 16 (%rdi)
  782. + mov %r10, 24 (%rdi)
  783. +
  784. + lea 32 (%rsi), %rsi
  785. + lea 32 (%rdi), %rdi
  786. +
  787. + jz L(32skip)
  788. - /* We need this in any case. */
  789. - cld
  790. + dec %ecx
  791. - jbe 1f
  792. + mov (%rsi), %rax
  793. + mov 8 (%rsi), %r8
  794. + mov 16 (%rsi), %r9
  795. + mov 24 (%rsi), %r10
  796. - /* Align destination. */
  797. - movq %rdi, %rax
  798. - negq %rax
  799. - andq $7, %rax
  800. - subq %rax, %rcx
  801. - xchgq %rax, %rcx
  802. + mov %rax, (%rdi)
  803. + mov %r8, 8 (%rdi)
  804. + mov %r9, 16 (%rdi)
  805. + mov %r10, 24 (%rdi)
  806. - rep; movsb
  807. + lea 32 (%rsi), %rsi
  808. + lea 32 (%rdi), %rdi
  809. - movq %rax, %rcx
  810. - subq $32, %rcx
  811. - js 2f
  812. + jnz L(32loop)
  813. .p2align 4
  814. -3:
  815. - /* Now correct the loop counter. Please note that in the following
  816. - code the flags are not changed anymore. */
  817. - subq $32, %rcx
  818. +L(32skip):
  819. + and $31, %edx # check for left overs
  820. + jnz L(8)
  821. +
  822. + pop %rax
  823. + ret
  824. +
  825. + .p2align 4
  826. +
  827. +L(32after):
  828. +
  829. +L(fasttry): # first 1/2 L1
  830. +#ifdef PIC
  831. +# ifdef SHARED
  832. + mov _rtld_local_ro@GOTPCREL (%rip), %r11
  833. + mov RTLD_GLOBAL_DL_CACHE1SIZEHALF (%r11), %r11
  834. +# else
  835. + mov _dl_cache1sizehalf@GOTPCREL (%rip), %r11
  836. + mov (%r11), %r11
  837. +# endif
  838. +#else
  839. + mov _dl_cache1sizehalf, %r11
  840. +#endif
  841. + cmp %rdx, %r11
  842. + cmova %rdx, %r11
  843. - movq (%rsi), %rax
  844. - movq 8(%rsi), %rdx
  845. - movq 16(%rsi), %r8
  846. - movq 24(%rsi), %r9
  847. - movq %rax, (%rdi)
  848. - movq %rdx, 8(%rdi)
  849. - movq %r8, 16(%rdi)
  850. - movq %r9, 24(%rdi)
  851. +L(fast): # good ol' MOVS
  852. + mov %r11, %rcx
  853. + and $-8, %r11
  854. + shr $3, %rcx
  855. + jz L(fastskip)
  856. +
  857. + rep
  858. + movsq
  859. +
  860. +L(fastskip):
  861. + sub %r11, %rdx # check for more
  862. + test $-8, %rdx
  863. + jnz L(fastafter)
  864. +
  865. + and $7, %edx # check for left overs
  866. + pop %rax
  867. + jnz L(1)
  868. - leaq 32(%rsi), %rsi
  869. - leaq 32(%rdi), %rdi
  870. + rep
  871. + ret
  872. - jns 3b
  873. + .p2align 4
  874. - /* Correct extra loop counter modification. */
  875. -2: addq $32, %rcx
  876. -1: rep; movsb
  877. +L(fastafter):
  878. -#if MEMPCPY_P
  879. - movq %rdi, %rax /* Set return value. */
  880. +L(pretry): # first 1/2 L2
  881. +#ifdef PIC
  882. +# ifdef SHARED
  883. + mov _rtld_local_ro@GOTPCREL (%rip), %r8
  884. + mov RTLD_GLOBAL_DL_CACHE2SIZEHALF (%r8), %r8
  885. #else
  886. - movq %r10, %rax /* Set return value. */
  887. + mov _dl_cache2sizehalf@GOTPCREL (%rip), %r8
  888. + mov (%r8), %r8
  889. +# endif
  890. +#else
  891. + mov _dl_cache2sizehalf, %r8
  892. +#endif
  893. + cmp %rdx, %r8
  894. + cmova %rdx, %r8
  895. +L(pre): # 64-byte with prefetching
  896. + mov %r8, %rcx
  897. + and $-64, %r8
  898. + shr $6, %rcx
  899. + jz L(preskip)
  900. +
  901. + push %r14
  902. + push %r13
  903. + push %r12
  904. + push %rbx
  905. +
  906. +#ifdef PIC
  907. +# ifdef SHARED
  908. + mov _rtld_local_ro@GOTPCREL (%rip), %rax
  909. + cmpl $0, RTLD_GLOBAL_DL_PREFETCHW (%rax)
  910. +# else
  911. + mov _dl_prefetchw@GOTPCREL (%rip), %rax
  912. + cmpl $0, (%rax)
  913. +# endif
  914. +#else
  915. + cmpl $0, _dl_prefetchw
  916. #endif
  917. + jz L(preloop)
  918. +
  919. + .p2align 4
  920. +
  921. +L(prewloop): # to state M
  922. + dec %rcx
  923. +
  924. + mov (%rsi), %rax
  925. + mov 8 (%rsi), %rbx
  926. + mov 16 (%rsi), %r9
  927. + mov 24 (%rsi), %r10
  928. + mov 32 (%rsi), %r11
  929. + mov 40 (%rsi), %r12
  930. + mov 48 (%rsi), %r13
  931. + mov 56 (%rsi), %r14
  932. +
  933. + prefetcht0 0 + 896 (%rsi)
  934. + prefetcht0 64 + 896 (%rsi)
  935. +
  936. + mov %rax, (%rdi)
  937. + mov %rbx, 8 (%rdi)
  938. + mov %r9, 16 (%rdi)
  939. + mov %r10, 24 (%rdi)
  940. + mov %r11, 32 (%rdi)
  941. + mov %r12, 40 (%rdi)
  942. + mov %r13, 48 (%rdi)
  943. + mov %r14, 56 (%rdi)
  944. +
  945. + lea 64 (%rsi), %rsi
  946. + lea 64 (%rdi), %rdi
  947. +
  948. + jz L(prebail)
  949. +
  950. + dec %rcx
  951. +
  952. + mov (%rsi), %rax
  953. + mov 8 (%rsi), %rbx
  954. + mov 16 (%rsi), %r9
  955. + mov 24 (%rsi), %r10
  956. + mov 32 (%rsi), %r11
  957. + mov 40 (%rsi), %r12
  958. + mov 48 (%rsi), %r13
  959. + mov 56 (%rsi), %r14
  960. +
  961. + mov %rax, (%rdi)
  962. + mov %rbx, 8 (%rdi)
  963. + mov %r9, 16 (%rdi)
  964. + mov %r10, 24 (%rdi)
  965. + mov %r11, 32 (%rdi)
  966. + mov %r12, 40 (%rdi)
  967. + mov %r13, 48 (%rdi)
  968. + mov %r14, 56 (%rdi)
  969. +
  970. + prefetchw 896 - 64 (%rdi)
  971. + prefetchw 896 - 0 (%rdi)
  972. +
  973. + lea 64 (%rsi), %rsi
  974. + lea 64 (%rdi), %rdi
  975. +
  976. + jnz L(prewloop)
  977. + jmp L(prebail)
  978. +
  979. + .p2align 4
  980. +
  981. +L(preloop): # to state E
  982. + dec %rcx
  983. +
  984. + mov (%rsi), %rax
  985. + mov 8 (%rsi), %rbx
  986. + mov 16 (%rsi), %r9
  987. + mov 24 (%rsi), %r10
  988. + mov 32 (%rsi), %r11
  989. + mov 40 (%rsi), %r12
  990. + mov 48 (%rsi), %r13
  991. + mov 56 (%rsi), %r14
  992. +
  993. + prefetcht0 896 + 0 (%rsi)
  994. + prefetcht0 896 + 64 (%rsi)
  995. +
  996. + mov %rax, (%rdi)
  997. + mov %rbx, 8 (%rdi)
  998. + mov %r9, 16 (%rdi)
  999. + mov %r10, 24 (%rdi)
  1000. + mov %r11, 32 (%rdi)
  1001. + mov %r12, 40 (%rdi)
  1002. + mov %r13, 48 (%rdi)
  1003. + mov %r14, 56 (%rdi)
  1004. +
  1005. + lea 64 (%rsi), %rsi
  1006. + lea 64 (%rdi), %rdi
  1007. +
  1008. + jz L(prebail)
  1009. +
  1010. + dec %rcx
  1011. +
  1012. + mov (%rsi), %rax
  1013. + mov 8 (%rsi), %rbx
  1014. + mov 16 (%rsi), %r9
  1015. + mov 24 (%rsi), %r10
  1016. + mov 32 (%rsi), %r11
  1017. + mov 40 (%rsi), %r12
  1018. + mov 48 (%rsi), %r13
  1019. + mov 56 (%rsi), %r14
  1020. +
  1021. + prefetcht0 896 - 64 (%rdi)
  1022. + prefetcht0 896 - 0 (%rdi)
  1023. +
  1024. + mov %rax, (%rdi)
  1025. + mov %rbx, 8 (%rdi)
  1026. + mov %r9, 16 (%rdi)
  1027. + mov %r10, 24 (%rdi)
  1028. + mov %r11, 32 (%rdi)
  1029. + mov %r12, 40 (%rdi)
  1030. + mov %r13, 48 (%rdi)
  1031. + mov %r14, 56 (%rdi)
  1032. +
  1033. + lea 64 (%rsi), %rsi
  1034. + lea 64 (%rdi), %rdi
  1035. +
  1036. + jnz L(preloop)
  1037. +
  1038. +L(prebail):
  1039. + pop %rbx
  1040. + pop %r12
  1041. + pop %r13
  1042. + pop %r14
  1043. +
  1044. +# .p2align 4
  1045. +
  1046. +L(preskip):
  1047. + sub %r8, %rdx # check for more
  1048. + test $-64, %rdx
  1049. + jnz L(preafter)
  1050. +
  1051. + and $63, %edx # check for left overs
  1052. + jnz L(32)
  1053. +
  1054. + pop %rax
  1055. + ret
  1056. +
  1057. + .p2align 4
  1058. +
  1059. +L(preafter):
  1060. +
  1061. +L(NTtry):
  1062. +
  1063. +L(NT): # NT 128-byte
  1064. + mov %rdx, %rcx
  1065. + shr $7, %rcx
  1066. + jz L(NTskip)
  1067. +
  1068. + push %r14
  1069. + push %r13
  1070. + push %r12
  1071. +
  1072. + .p2align 4
  1073. +
  1074. +L(NTloop):
  1075. + prefetchnta 768 (%rsi)
  1076. + prefetchnta 832 (%rsi)
  1077. +
  1078. + dec %rcx
  1079. +
  1080. + mov (%rsi), %rax
  1081. + mov 8 (%rsi), %r8
  1082. + mov 16 (%rsi), %r9
  1083. + mov 24 (%rsi), %r10
  1084. + mov 32 (%rsi), %r11
  1085. + mov 40 (%rsi), %r12
  1086. + mov 48 (%rsi), %r13
  1087. + mov 56 (%rsi), %r14
  1088. +
  1089. + movnti %rax, (%rdi)
  1090. + movnti %r8, 8 (%rdi)
  1091. + movnti %r9, 16 (%rdi)
  1092. + movnti %r10, 24 (%rdi)
  1093. + movnti %r11, 32 (%rdi)
  1094. + movnti %r12, 40 (%rdi)
  1095. + movnti %r13, 48 (%rdi)
  1096. + movnti %r14, 56 (%rdi)
  1097. +
  1098. + mov 64 (%rsi), %rax
  1099. + mov 72 (%rsi), %r8
  1100. + mov 80 (%rsi), %r9
  1101. + mov 88 (%rsi), %r10
  1102. + mov 96 (%rsi), %r11
  1103. + mov 104 (%rsi), %r12
  1104. + mov 112 (%rsi), %r13
  1105. + mov 120 (%rsi), %r14
  1106. +
  1107. + movnti %rax, 64 (%rdi)
  1108. + movnti %r8, 72 (%rdi)
  1109. + movnti %r9, 80 (%rdi)
  1110. + movnti %r10, 88 (%rdi)
  1111. + movnti %r11, 96 (%rdi)
  1112. + movnti %r12, 104 (%rdi)
  1113. + movnti %r13, 112 (%rdi)
  1114. + movnti %r14, 120 (%rdi)
  1115. +
  1116. + lea 128 (%rsi), %rsi
  1117. + lea 128 (%rdi), %rdi
  1118. +
  1119. + jnz L(NTloop)
  1120. +
  1121. + mfence # serialize memory operations
  1122. +
  1123. + pop %r12
  1124. + pop %r13
  1125. + pop %r14
  1126. +
  1127. +L(NTskip):
  1128. + and $127, %edx # check for left overs
  1129. + jnz L(32)
  1130. +
  1131. + pop %rax
  1132. ret
  1133. -END (BP_SYM (memcpy))
  1134. -#if !MEMPCPY_P
  1135. +END (memcpy)
  1136. +
  1137. +#ifndef USE_AS_MEMPCPY
  1138. libc_hidden_builtin_def (memcpy)
  1139. #endif
  1140. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/mempcpy.S libc/sysdeps/x86_64/mempcpy.S
  1141. --- libc/sysdeps/x86_64/mempcpy.S 2004-10-17 23:17:08.000000000 -0500
  1142. +++ libc/sysdeps/x86_64/mempcpy.S 2006-05-05 15:24:18.279191000 -0500
  1143. @@ -1,3 +1,4 @@
  1144. +#define USE_AS_MEMPCPY
  1145. #define memcpy __mempcpy
  1146. #define __memcpy_chk __mempcpy_chk
  1147. #include <sysdeps/x86_64/memcpy.S>
  1148. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/memset.S libc/sysdeps/x86_64/memset.S
  1149. --- libc/sysdeps/x86_64/memset.S 2005-03-31 04:00:13.000000000 -0600
  1150. +++ libc/sysdeps/x86_64/memset.S 2006-05-15 11:38:13.737756000 -0500
  1151. @@ -1,145 +1,322 @@
  1152. -/* memset/bzero -- set memory area to CH/0
  1153. - Optimized version for x86-64.
  1154. - Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
  1155. - This file is part of the GNU C Library.
  1156. - Contributed by Andreas Jaeger <aj@suse.de>.
  1157. -
  1158. - The GNU C Library is free software; you can redistribute it and/or
  1159. - modify it under the terms of the GNU Lesser General Public
  1160. - License as published by the Free Software Foundation; either
  1161. - version 2.1 of the License, or (at your option) any later version.
  1162. -
  1163. - The GNU C Library is distributed in the hope that it will be useful,
  1164. - but WITHOUT ANY WARRANTY; without even the implied warranty of
  1165. - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  1166. - Lesser General Public License for more details.
  1167. -
  1168. - You should have received a copy of the GNU Lesser General Public
  1169. - License along with the GNU C Library; if not, write to the Free
  1170. - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  1171. - 02111-1307 USA. */
  1172. +# (c) 2002 Advanced Micro Devices, Inc.
  1173. +# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
  1174. +# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
  1175. +# LICENSE FOUND IN THE "README" FILE THAT IS
  1176. +# INCLUDED WITH THIS FILE
  1177. #include <sysdep.h>
  1178. #include "asm-syntax.h"
  1179. #include "bp-sym.h"
  1180. #include "bp-asm.h"
  1181. +#if defined PIC && defined SHARED
  1182. +# include <rtld-global-offsets.h>
  1183. +#endif
  1184. -/* BEWARE: `#ifdef memset' means that memset is redefined as `bzero' */
  1185. -#define BZERO_P (defined memset)
  1186. -
  1187. -/* This is somehow experimental and could made dependend on the cache
  1188. - size. */
  1189. -#define LARGE $120000
  1190. +#if defined PIC && defined SHARED
  1191. + .globl _rtld_local_ro
  1192. + .hidden _rtld_local_ro
  1193. + .set _rtld_local_ro,_rtld_global_ro
  1194. +#endif
  1195. .text
  1196. -#if !BZERO_P && defined PIC && !defined NOT_IN_libc
  1197. +#if !defined USE_AS_BZERO && defined PIC && !defined NOT_IN_libc
  1198. ENTRY (__memset_chk)
  1199. cmpq %rdx, %rcx
  1200. jb HIDDEN_JUMPTARGET (__chk_fail)
  1201. END (__memset_chk)
  1202. #endif
  1203. -ENTRY (memset)
  1204. -#if BZERO_P
  1205. - mov %rsi,%rdx /* Adjust parameter. */
  1206. - xorl %esi,%esi /* Fill with 0s. */
  1207. -#endif
  1208. - cmp $0x7,%rdx /* Check for small length. */
  1209. - mov %rdi,%rcx /* Save ptr as return value. */
  1210. - jbe 7f
  1211. -#if BZERO_P
  1212. - mov %rsi,%r8 /* Just copy 0. */
  1213. +ENTRY (memset) # (void *, const void*, size_t)
  1214. +
  1215. +#ifdef USE_AS_BZERO
  1216. + mov %rsi, %rdx # memset doubles as bzero
  1217. + xorl %esi, %esi
  1218. +#else
  1219. + mov $0x0101010101010101, %rcx # memset is itself
  1220. + movzx %sil, %rsi
  1221. + imul %rcx, %rsi # replicate 8 times
  1222. +#endif
  1223. +
  1224. +L(try1): # up to 64B
  1225. + cmp $64, %rdx
  1226. + mov %rdi, %rax # return memory block address (even for bzero ())
  1227. + jae L(1after)
  1228. +
  1229. +L(1): # 1-byte loop
  1230. + test $1, %dl
  1231. + jz L(1a)
  1232. +
  1233. + mov %sil, (%rdi)
  1234. + inc %rdi
  1235. +
  1236. +L(1a):
  1237. + test $2, %dl
  1238. + jz L(1b)
  1239. +
  1240. + mov %si, (%rdi)
  1241. + add $2, %rdi
  1242. +
  1243. +L(1b):
  1244. + test $4, %dl
  1245. + jz L(1c)
  1246. +
  1247. + mov %esi, (%rdi)
  1248. + add $4, %rdi
  1249. +
  1250. +L(1c):
  1251. + test $8, %dl
  1252. + jz L(1d)
  1253. +
  1254. + mov %rsi, (%rdi)
  1255. + add $8, %rdi
  1256. +
  1257. +L(1d):
  1258. + test $16, %dl
  1259. + jz L(1e)
  1260. +
  1261. + mov %rsi, (%rdi)
  1262. + mov %rsi, 8 (%rdi)
  1263. + add $16, %rdi
  1264. +
  1265. +L(1e):
  1266. + test $32, %dl
  1267. + jz L(1f)
  1268. +
  1269. + mov %rsi, (%rdi)
  1270. + mov %rsi, 8 (%rdi)
  1271. + mov %rsi, 16 (%rdi)
  1272. + mov %rsi, 24 (%rdi)
  1273. +# add $32, %rdi
  1274. +
  1275. +L(1f):
  1276. +
  1277. +L(exit):
  1278. + rep
  1279. + ret
  1280. +
  1281. + .p2align 4
  1282. +
  1283. +L(1after):
  1284. +
  1285. +L(32try): # up to 512B
  1286. + cmp $512, %rdx
  1287. + ja L(32after)
  1288. +
  1289. +L(32): # 32-byte loop
  1290. + mov %edx, %ecx
  1291. + shr $5, %ecx
  1292. + jz L(32skip)
  1293. +
  1294. + .p2align 4
  1295. +
  1296. +L(32loop):
  1297. + dec %ecx
  1298. +
  1299. + mov %rsi, (%rdi)
  1300. + mov %rsi, 8 (%rdi)
  1301. + mov %rsi, 16 (%rdi)
  1302. + mov %rsi, 24 (%rdi)
  1303. +
  1304. + lea 32 (%rdi), %rdi
  1305. +
  1306. + jz L(32skip)
  1307. +
  1308. + dec %ecx
  1309. +
  1310. + mov %rsi, (%rdi)
  1311. + mov %rsi, 8 (%rdi)
  1312. + mov %rsi, 16 (%rdi)
  1313. + mov %rsi, 24 (%rdi)
  1314. +
  1315. + lea 32 (%rdi), %rdi
  1316. +
  1317. + jnz L(32loop)
  1318. +
  1319. + .p2align 4
  1320. +
  1321. +L(32skip):
  1322. + and $31, %edx # check for left overs
  1323. + jnz L(1)
  1324. +
  1325. + rep
  1326. + ret
  1327. +
  1328. + .p2align 4
  1329. +
  1330. +L(32after):
  1331. +
  1332. +L(aligntry):
  1333. + mov %edi, %ecx # align by destination
  1334. +
  1335. + and $7, %ecx # skip if already aligned
  1336. + jz L(alignafter)
  1337. +
  1338. +L(align): # align loop
  1339. + lea -8 (%rcx, %rdx), %rdx
  1340. + sub $8, %ecx
  1341. +
  1342. + .p2align 4
  1343. +
  1344. +L(alignloop):
  1345. + inc %ecx
  1346. +
  1347. + mov %sil, (%rdi)
  1348. + lea 1 (%rdi), %rdi
  1349. +
  1350. + jnz L(alignloop)
  1351. +
  1352. + .p2align 4
  1353. +
  1354. +L(alignafter):
  1355. +
  1356. +# For MP System half cache size is better,
  1357. +# for UP full cache size is better.
  1358. +# Use half cache size only.
  1359. +L(fasttry): # between 2KB and 1/2 L2
  1360. +#ifdef PIC
  1361. +# ifdef SHARED
  1362. + mov _rtld_local_ro@GOTPCREL (%rip), %r8
  1363. + mov RTLD_GLOBAL_DL_CACHE2SIZEHALF (%r8), %r8
  1364. #else
  1365. - /* Populate 8 bit data to full 64-bit. */
  1366. - movabs $0x0101010101010101,%r8
  1367. - movzbl %sil,%eax
  1368. - imul %rax,%r8
  1369. -#endif
  1370. - test $0x7,%edi /* Check for alignment. */
  1371. - je 2f
  1372. -
  1373. - .p2align 4
  1374. -1: /* Align ptr to 8 byte. */
  1375. - mov %sil,(%rcx)
  1376. - dec %rdx
  1377. - inc %rcx
  1378. - test $0x7,%ecx
  1379. - jne 1b
  1380. -
  1381. -2: /* Check for really large regions. */
  1382. - mov %rdx,%rax
  1383. - shr $0x6,%rax
  1384. - je 4f
  1385. - cmp LARGE, %rdx
  1386. - jae 11f
  1387. -
  1388. - .p2align 4
  1389. -3: /* Copy 64 bytes. */
  1390. - mov %r8,(%rcx)
  1391. - mov %r8,0x8(%rcx)
  1392. - mov %r8,0x10(%rcx)
  1393. - mov %r8,0x18(%rcx)
  1394. - mov %r8,0x20(%rcx)
  1395. - mov %r8,0x28(%rcx)
  1396. - mov %r8,0x30(%rcx)
  1397. - mov %r8,0x38(%rcx)
  1398. - add $0x40,%rcx
  1399. - dec %rax
  1400. - jne 3b
  1401. -
  1402. -4: /* Copy final bytes. */
  1403. - and $0x3f,%edx
  1404. - mov %rdx,%rax
  1405. - shr $0x3,%rax
  1406. - je 6f
  1407. -
  1408. -5: /* First in chunks of 8 bytes. */
  1409. - mov %r8,(%rcx)
  1410. - add $0x8,%rcx
  1411. - dec %rax
  1412. - jne 5b
  1413. -6:
  1414. - and $0x7,%edx
  1415. -7:
  1416. - test %rdx,%rdx
  1417. - je 9f
  1418. -8: /* And finally as bytes (up to 7). */
  1419. - mov %sil,(%rcx)
  1420. - inc %rcx
  1421. - dec %rdx
  1422. - jne 8b
  1423. -9:
  1424. -#if BZERO_P
  1425. - nop
  1426. + mov _dl_cache2sizehalf@GOTPCREL (%rip), %r8
  1427. + mov (%r8), %r8
  1428. +# endif
  1429. #else
  1430. - /* Load result (only if used as memset). */
  1431. - mov %rdi,%rax /* start address of destination is result */
  1432. + mov _dl_cache2sizehalf, %r8
  1433. #endif
  1434. - retq
  1435. + cmp %rdx, %r8
  1436. + cmova %rdx, %r8
  1437. +
  1438. + cmp $2048, %rdx # this is slow for some block sizes
  1439. + jb L(64)
  1440. +
  1441. +L(fast): # microcode loop
  1442. + mov %r8, %rcx
  1443. + and $-8, %r8
  1444. + shr $3, %rcx
  1445. +
  1446. + xchg %rax, %rsi
  1447. +
  1448. + rep
  1449. + stosq
  1450. +
  1451. + xchg %rax, %rsi
  1452. +
  1453. +L(fastskip):
  1454. + sub %r8, %rdx # check for more
  1455. + ja L(64after)
  1456. +
  1457. + and $7, %edx # check for left overs
  1458. + jnz L(1)
  1459. +
  1460. + rep
  1461. + ret
  1462. .p2align 4
  1463. -11: /* Copy 64 bytes without polluting the cache. */
  1464. - /* We could use movntdq %xmm0,(%rcx) here to further
  1465. - speed up for large cases but let's not use XMM registers. */
  1466. - movnti %r8,(%rcx)
  1467. - movnti %r8,0x8(%rcx)
  1468. - movnti %r8,0x10(%rcx)
  1469. - movnti %r8,0x18(%rcx)
  1470. - movnti %r8,0x20(%rcx)
  1471. - movnti %r8,0x28(%rcx)
  1472. - movnti %r8,0x30(%rcx)
  1473. - movnti %r8,0x38(%rcx)
  1474. - add $0x40,%rcx
  1475. - dec %rax
  1476. - jne 11b
  1477. - jmp 4b
  1478. +
  1479. +L(fastafter):
  1480. +
  1481. +L(64try): # up to 2KB
  1482. +
  1483. +L(64): # 64-byte loop
  1484. + mov %r8, %rcx
  1485. + and $-64, %r8
  1486. + shr $6, %rcx
  1487. +
  1488. + dec %rcx # this iteration starts the prefetcher sooner
  1489. +
  1490. + mov %rsi, (%rdi)
  1491. + mov %rsi, 8 (%rdi)
  1492. + mov %rsi, 16 (%rdi)
  1493. + mov %rsi, 24 (%rdi)
  1494. + mov %rsi, 32 (%rdi)
  1495. + mov %rsi, 40 (%rdi)
  1496. + mov %rsi, 48 (%rdi)
  1497. + mov %rsi, 56 (%rdi)
  1498. +
  1499. + lea 64 (%rdi), %rdi
  1500. +
  1501. + .p2align 4
  1502. +
  1503. +L(64loop):
  1504. + dec %rcx
  1505. +
  1506. + mov %rsi, (%rdi)
  1507. + mov %rsi, 8 (%rdi)
  1508. + mov %rsi, 16 (%rdi)
  1509. + mov %rsi, 24 (%rdi)
  1510. + mov %rsi, 32 (%rdi)
  1511. + mov %rsi, 40 (%rdi)
  1512. + mov %rsi, 48 (%rdi)
  1513. + mov %rsi, 56 (%rdi)
  1514. +
  1515. + lea 64 (%rdi), %rdi
  1516. +
  1517. + jnz L(64loop)
  1518. +
  1519. +L(64skip):
  1520. + sub %r8, %rdx # check for more
  1521. + ja L(64after)
  1522. +
  1523. + and $63, %edx # check for left overs
  1524. + jnz L(32)
  1525. +
  1526. + rep
  1527. + ret
  1528. +
  1529. + .p2align 4
  1530. +
  1531. +L(64after):
  1532. +
  1533. +L(NTtry):
  1534. +
  1535. +L(NT): # 128-byte NT loop
  1536. + mov %rdx, %rcx
  1537. + shr $7, %rcx
  1538. + jz L(NTskip)
  1539. +
  1540. + .p2align 4
  1541. +
  1542. +L(NTloop): # on an MP system it would be better to prefetchnta 320 (%rdi) and 384 (%rdi) here, but not so on an 1P system
  1543. + dec %rcx
  1544. +
  1545. + movnti %rsi, (%rdi)
  1546. + movnti %rsi, 8 (%rdi)
  1547. + movnti %rsi, 16 (%rdi)
  1548. + movnti %rsi, 24 (%rdi)
  1549. + movnti %rsi, 32 (%rdi)
  1550. + movnti %rsi, 40 (%rdi)
  1551. + movnti %rsi, 48 (%rdi)
  1552. + movnti %rsi, 56 (%rdi)
  1553. + movnti %rsi, 64 (%rdi)
  1554. + movnti %rsi, 72 (%rdi)
  1555. + movnti %rsi, 80 (%rdi)
  1556. + movnti %rsi, 88 (%rdi)
  1557. + movnti %rsi, 96 (%rdi)
  1558. + movnti %rsi, 104 (%rdi)
  1559. + movnti %rsi, 112 (%rdi)
  1560. + movnti %rsi, 120 (%rdi)
  1561. +
  1562. + lea 128 (%rdi), %rdi
  1563. +
  1564. + jnz L(NTloop)
  1565. +
  1566. + mfence # serialize memory operations
  1567. +
  1568. +L(NTskip):
  1569. + and $127, %edx # check for left overs
  1570. + jnz L(32)
  1571. +
  1572. + rep
  1573. + ret
  1574. END (memset)
  1575. -#if !BZERO_P
  1576. +
  1577. +#ifndef USE_AS_BZERO
  1578. libc_hidden_builtin_def (memset)
  1579. #endif
  1580. -#if !BZERO_P && defined PIC && !defined NOT_IN_libc
  1581. +#if !defined USE_AS_BZERO && defined PIC && !defined NOT_IN_libc
  1582. strong_alias (__memset_chk, __memset_zero_constant_len_parameter)
  1583. .section .gnu.warning.__memset_zero_constant_len_parameter
  1584. .string "memset used with constant zero length parameter; this could be due to transposed parameters"
  1585. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/stpcpy.S libc/sysdeps/x86_64/stpcpy.S
  1586. --- libc/sysdeps/x86_64/stpcpy.S 2004-05-28 01:39:37.000000000 -0500
  1587. +++ libc/sysdeps/x86_64/stpcpy.S 2006-05-05 15:24:41.775991000 -0500
  1588. @@ -1,5 +1,5 @@
  1589. #define USE_AS_STPCPY
  1590. -#define STRCPY __stpcpy
  1591. +#define strcpy __stpcpy
  1592. #include <sysdeps/x86_64/strcpy.S>
  1593. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/stpncpy.S libc/sysdeps/x86_64/stpncpy.S
  1594. --- libc/sysdeps/x86_64/stpncpy.S 1969-12-31 18:00:00.000000000 -0600
  1595. +++ libc/sysdeps/x86_64/stpncpy.S 2006-05-05 15:24:50.748541000 -0500
  1596. @@ -0,0 +1,9 @@
  1597. +#define USE_AS_STRNCPY
  1598. +#define USE_AS_STPCPY
  1599. +#define strcpy __stpncpy
  1600. +
  1601. +#include <sysdeps/x86_64/strcpy.S>
  1602. +
  1603. +weak_alias (__stpncpy, stpncpy)
  1604. +libc_hidden_def (__stpncpy)
  1605. +libc_hidden_builtin_def (stpncpy)
  1606. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/strcpy.S libc/sysdeps/x86_64/strcpy.S
  1607. --- libc/sysdeps/x86_64/strcpy.S 2003-04-29 17:47:18.000000000 -0500
  1608. +++ libc/sysdeps/x86_64/strcpy.S 2006-05-19 13:41:31.281326000 -0500
  1609. @@ -1,159 +1,1141 @@
  1610. -/* strcpy/stpcpy implementation for x86-64.
  1611. - Copyright (C) 2002 Free Software Foundation, Inc.
  1612. - This file is part of the GNU C Library.
  1613. - Contributed by Andreas Jaeger <aj@suse.de>, 2002.
  1614. -
  1615. - The GNU C Library is free software; you can redistribute it and/or
  1616. - modify it under the terms of the GNU Lesser General Public
  1617. - License as published by the Free Software Foundation; either
  1618. - version 2.1 of the License, or (at your option) any later version.
  1619. -
  1620. - The GNU C Library is distributed in the hope that it will be useful,
  1621. - but WITHOUT ANY WARRANTY; without even the implied warranty of
  1622. - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  1623. - Lesser General Public License for more details.
  1624. -
  1625. - You should have received a copy of the GNU Lesser General Public
  1626. - License along with the GNU C Library; if not, write to the Free
  1627. - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  1628. - 02111-1307 USA. */
  1629. -
  1630. -#include <sysdep.h>
  1631. -#include "asm-syntax.h"
  1632. -#include "bp-sym.h"
  1633. -#include "bp-asm.h"
  1634. +# (c) 2002 Advanced Micro Devices, Inc.
  1635. +# YOUR USE OF THIS CODE IS SUBJECT TO THE TERMS
  1636. +# AND CONDITIONS OF THE GNU LESSER GENERAL PUBLIC
  1637. +# LICENSE FOUND IN THE "README" FILE THAT IS
  1638. +# INCLUDED WITH THIS FILE
  1639. +
  1640. +#include "sysdep.h"
  1641. +#if defined PIC && defined SHARED
  1642. +# include <rtld-global-offsets.h>
  1643. +#endif
  1644. -#ifndef USE_AS_STPCPY
  1645. -# define STRCPY strcpy
  1646. +#if defined PIC && defined SHARED
  1647. + .globl _rtld_local_ro
  1648. + .hidden _rtld_local_ro
  1649. + .set _rtld_local_ro,_rtld_global_ro
  1650. #endif
  1651. .text
  1652. -ENTRY (BP_SYM (STRCPY))
  1653. - movq %rsi, %rcx /* Source register. */
  1654. - andl $7, %ecx /* mask alignment bits */
  1655. - movq %rdi, %rdx /* Duplicate destination pointer. */
  1656. -
  1657. - jz 5f /* aligned => start loop */
  1658. -
  1659. - neg %ecx /* We need to align to 8 bytes. */
  1660. - addl $8,%ecx
  1661. - /* Search the first bytes directly. */
  1662. -0:
  1663. - movb (%rsi), %al /* Fetch a byte */
  1664. - testb %al, %al /* Is it NUL? */
  1665. - movb %al, (%rdx) /* Store it */
  1666. - jz 4f /* If it was NUL, done! */
  1667. - incq %rsi
  1668. - incq %rdx
  1669. - decl %ecx
  1670. - jnz 0b
  1671. -
  1672. -5:
  1673. - movq $0xfefefefefefefeff,%r8
  1674. -
  1675. - /* Now the sources is aligned. Unfortunatly we cannot force
  1676. - to have both source and destination aligned, so ignore the
  1677. - alignment of the destination. */
  1678. +
  1679. +ENTRY (strcpy) # (char *, const char *)
  1680. +
  1681. +#ifdef USE_AS_STRNCPY // (char *, const char *, size_t)
  1682. + test %rdx, %rdx
  1683. + mov %rdx, %r11
  1684. + jz L(exit) # early exit
  1685. +#endif
  1686. +
  1687. + xor %edx, %edx
  1688. +
  1689. +L(aligntry): # between 0 and 7 bytes
  1690. + mov %rsi, %r8 # align by source
  1691. + and $7, %r8
  1692. + jz L(alignafter)
  1693. +
  1694. +L(align): # 8-byte align
  1695. + sub $8, %r8
  1696. +
  1697. .p2align 4
  1698. -1:
  1699. - /* 1st unroll. */
  1700. - movq (%rsi), %rax /* Read double word (8 bytes). */
  1701. - addq $8, %rsi /* Adjust pointer for next word. */
  1702. - movq %rax, %r9 /* Save a copy for NUL finding. */
  1703. - addq %r8, %r9 /* add the magic value to the word. We get
  1704. - carry bits reported for each byte which
  1705. - is *not* 0 */
  1706. - jnc 3f /* highest byte is NUL => return pointer */
  1707. - xorq %rax, %r9 /* (word+magic)^word */
  1708. - orq %r8, %r9 /* set all non-carry bits */
  1709. - incq %r9 /* add 1: if one carry bit was *not* set
  1710. - the addition will not result in 0. */
  1711. -
  1712. - jnz 3f /* found NUL => return pointer */
  1713. -
  1714. - movq %rax, (%rdx) /* Write value to destination. */
  1715. - addq $8, %rdx /* Adjust pointer. */
  1716. -
  1717. - /* 2nd unroll. */
  1718. - movq (%rsi), %rax /* Read double word (8 bytes). */
  1719. - addq $8, %rsi /* Adjust pointer for next word. */
  1720. - movq %rax, %r9 /* Save a copy for NUL finding. */
  1721. - addq %r8, %r9 /* add the magic value to the word. We get
  1722. - carry bits reported for each byte which
  1723. - is *not* 0 */
  1724. - jnc 3f /* highest byte is NUL => return pointer */
  1725. - xorq %rax, %r9 /* (word+magic)^word */
  1726. - orq %r8, %r9 /* set all non-carry bits */
  1727. - incq %r9 /* add 1: if one carry bit was *not* set
  1728. - the addition will not result in 0. */
  1729. -
  1730. - jnz 3f /* found NUL => return pointer */
  1731. -
  1732. - movq %rax, (%rdx) /* Write value to destination. */
  1733. - addq $8, %rdx /* Adjust pointer. */
  1734. -
  1735. - /* 3rd unroll. */
  1736. - movq (%rsi), %rax /* Read double word (8 bytes). */
  1737. - addq $8, %rsi /* Adjust pointer for next word. */
  1738. - movq %rax, %r9 /* Save a copy for NUL finding. */
  1739. - addq %r8, %r9 /* add the magic value to the word. We get
  1740. - carry bits reported for each byte which
  1741. - is *not* 0 */
  1742. - jnc 3f /* highest byte is NUL => return pointer */
  1743. - xorq %rax, %r9 /* (word+magic)^word */
  1744. - orq %r8, %r9 /* set all non-carry bits */
  1745. - incq %r9 /* add 1: if one carry bit was *not* set
  1746. - the addition will not result in 0. */
  1747. -
  1748. - jnz 3f /* found NUL => return pointer */
  1749. -
  1750. - movq %rax, (%rdx) /* Write value to destination. */
  1751. - addq $8, %rdx /* Adjust pointer. */
  1752. -
  1753. - /* 4th unroll. */
  1754. - movq (%rsi), %rax /* Read double word (8 bytes). */
  1755. - addq $8, %rsi /* Adjust pointer for next word. */
  1756. - movq %rax, %r9 /* Save a copy for NUL finding. */
  1757. - addq %r8, %r9 /* add the magic value to the word. We get
  1758. - carry bits reported for each byte which
  1759. - is *not* 0 */
  1760. - jnc 3f /* highest byte is NUL => return pointer */
  1761. - xorq %rax, %r9 /* (word+magic)^word */
  1762. - orq %r8, %r9 /* set all non-carry bits */
  1763. - incq %r9 /* add 1: if one carry bit was *not* set
  1764. - the addition will not result in 0. */
  1765. -
  1766. - jnz 3f /* found NUL => return pointer */
  1767. -
  1768. - movq %rax, (%rdx) /* Write value to destination. */
  1769. - addq $8, %rdx /* Adjust pointer. */
  1770. - jmp 1b /* Next iteration. */
  1771. - /* Do the last few bytes. %rax contains the value to write.
  1772. - The loop is unrolled twice. */
  1773. +L(alignloop):
  1774. + movzbl (%rsi, %rdx), %eax
  1775. + test %al, %al # check if character a NUL
  1776. + mov %al, (%rdi, %rdx)
  1777. + jz L(exit)
  1778. +
  1779. + inc %edx
  1780. +
  1781. +#ifdef USE_AS_STRNCPY
  1782. + dec %r11
  1783. + jz L(exit)
  1784. +#endif
  1785. +
  1786. + inc %r8
  1787. + jnz L(alignloop)
  1788. +
  1789. + .p2align 4,, 7
  1790. +
  1791. +L(alignafter):
  1792. +
  1793. +L(8try): # up to 64 bytes
  1794. + mov $0xfefefefefefefeff, %rcx
  1795. +
  1796. +L(8): # 8-byte loop
  1797. +
  1798. +L(8loop):
  1799. +#ifdef USE_AS_STRNCPY
  1800. + sub $8, %r11
  1801. + jbe L(tail)
  1802. +#endif
  1803. +
  1804. + mov (%rsi, %rdx), %rax
  1805. +
  1806. + mov %rcx, %r8
  1807. + add %rax, %r8
  1808. + jnc L(tail) # sbb %r10, %r10
  1809. +
  1810. + xor %rax, %r8
  1811. + or %rcx, %r8
  1812. + inc %r8 # sub %r10, %r8
  1813. + jnz L(tail)
  1814. +
  1815. + mov %rax, (%rdi, %rdx)
  1816. +
  1817. +#ifdef USE_AS_STRNCPY
  1818. + add $8, %edx
  1819. +
  1820. + sub $8, %r11
  1821. + jbe L(tail)
  1822. +
  1823. + mov (%rsi, %rdx), %rax
  1824. +#else
  1825. + mov 8 (%rsi, %rdx), %rax
  1826. + add $8, %edx
  1827. +#endif
  1828. +
  1829. + mov %rcx, %r8
  1830. + add %rax, %r8
  1831. + jnc L(tail) # sbb %r10, %r10
  1832. +
  1833. + xor %rax, %r8
  1834. + or %rcx, %r8
  1835. + inc %r8 # sub %r10, %r8
  1836. + jnz L(tail)
  1837. +
  1838. + mov %rax, (%rdi, %rdx)
  1839. +
  1840. +#ifdef USE_AS_STRNCPY
  1841. + add $8, %edx
  1842. +
  1843. + sub $8, %r11
  1844. + jbe L(tail)
  1845. +
  1846. + mov (%rsi, %rdx), %rax
  1847. +#else
  1848. + mov 8 (%rsi, %rdx), %rax
  1849. + add $8, %edx
  1850. +#endif
  1851. +
  1852. + mov %rcx, %r8
  1853. + add %rax, %r8
  1854. + jnc L(tail) # sbb %r10, %r10
  1855. +
  1856. + xor %rax, %r8
  1857. + or %rcx, %r8
  1858. + inc %r8 # sub %r10, %r8
  1859. + jnz L(tail)
  1860. +
  1861. + mov %rax, (%rdi, %rdx)
  1862. +
  1863. +#ifdef USE_AS_STRNCPY
  1864. + add $8, %edx
  1865. +
  1866. + sub $8, %r11
  1867. + jbe L(tail)
  1868. +
  1869. + mov (%rsi, %rdx), %rax
  1870. +#else
  1871. + mov 8 (%rsi, %rdx), %rax
  1872. + add $8, %edx
  1873. +#endif
  1874. +
  1875. + mov %rcx, %r8
  1876. + add %rax, %r8
  1877. + jnc L(tail) # sbb %r10, %r10
  1878. +
  1879. + xor %rax, %r8
  1880. + or %rcx, %r8
  1881. + inc %r8 # sub %r10, %r8
  1882. + jnz L(tail)
  1883. +
  1884. + mov %rax, (%rdi, %rdx)
  1885. +
  1886. +#ifdef USE_AS_STRNCPY
  1887. + add $8, %edx
  1888. +
  1889. + sub $8, %r11
  1890. + jbe L(tail)
  1891. +
  1892. + mov (%rsi, %rdx), %rax
  1893. +#else
  1894. + mov 8 (%rsi, %rdx), %rax
  1895. + add $8, %edx
  1896. +#endif
  1897. +
  1898. + mov %rcx, %r8
  1899. + add %rax, %r8
  1900. + jnc L(tail) # sbb %r10, %r10
  1901. +
  1902. + xor %rax, %r8
  1903. + or %rcx, %r8
  1904. + inc %r8 # sub %r10, %r8
  1905. + jnz L(tail)
  1906. +
  1907. + mov %rax, (%rdi, %rdx)
  1908. +
  1909. +#ifdef USE_AS_STRNCPY
  1910. + add $8, %edx
  1911. +
  1912. + sub $8, %r11
  1913. + jbe L(tail)
  1914. +
  1915. + mov (%rsi, %rdx), %rax
  1916. +#else
  1917. + mov 8 (%rsi, %rdx), %rax
  1918. + add $8, %edx
  1919. +#endif
  1920. +
  1921. + mov %rcx, %r8
  1922. + add %rax, %r8
  1923. + jnc L(tail) # sbb %r10, %r10
  1924. +
  1925. + xor %rax, %r8
  1926. + or %rcx, %r8
  1927. + inc %r8 # sub %r10, %r8
  1928. + jnz L(tail)
  1929. +
  1930. + mov %rax, (%rdi, %rdx)
  1931. +
  1932. +#ifdef USE_AS_STRNCPY
  1933. + add $8, %edx
  1934. +
  1935. + sub $8, %r11
  1936. + jbe L(tail)
  1937. +
  1938. + mov (%rsi, %rdx), %rax
  1939. +#else
  1940. + mov 8 (%rsi, %rdx), %rax
  1941. + add $8, %edx
  1942. +#endif
  1943. +
  1944. + mov %rcx, %r8
  1945. + add %rax, %r8
  1946. + jnc L(tail) # sbb %r10, %r10
  1947. +
  1948. + xor %rax, %r8
  1949. + or %rcx, %r8
  1950. + inc %r8 # sub %r10, %r8
  1951. + jnz L(tail)
  1952. +
  1953. + mov %rax, (%rdi, %rdx)
  1954. +
  1955. +#ifdef USE_AS_STRNCPY
  1956. + add $8, %edx
  1957. +
  1958. + sub $8, %r11
  1959. + jbe L(tail)
  1960. +
  1961. + mov (%rsi, %rdx), %rax
  1962. +#else
  1963. + mov 8 (%rsi, %rdx), %rax
  1964. + add $8, %edx
  1965. +#endif
  1966. +
  1967. + mov %rcx, %r8
  1968. + add %rax, %r8
  1969. + jnc L(tail) # sbb %r10, %r10
  1970. +
  1971. + xor %rax, %r8
  1972. + or %rcx, %r8
  1973. + inc %r8 # sub %r10, %r8
  1974. + jnz L(tail)
  1975. +
  1976. + mov %rax, (%rdi, %rdx)
  1977. +
  1978. + add $8, %edx
  1979. +
  1980. +L(8after): # up to 64 bytes
  1981. +
  1982. +L(64try): # up to 1/2 L1
  1983. +#ifdef PIC
  1984. +# ifdef SHARED
  1985. + mov _rtld_local_ro@GOTPCREL (%rip), %r9
  1986. + mov RTLD_GLOBAL_DL_CACHE1SIZEHALF (%r9), %r9
  1987. +# else
  1988. + mov _dl_cache1sizehalf@GOTPCREL (%rip), %r9
  1989. + mov (%r9), %r9
  1990. +# endif
  1991. +#else
  1992. + mov _dl_cache1sizehalf, %r9
  1993. +#endif
  1994. +
  1995. +L(64): # 64-byte loop
  1996. +
  1997. .p2align 4
  1998. -3:
  1999. - /* Note that stpcpy needs to return with the value of the NUL
  2000. - byte. */
  2001. - movb %al, (%rdx) /* 1st byte. */
  2002. - testb %al, %al /* Is it NUL. */
  2003. - jz 4f /* yes, finish. */
  2004. - incq %rdx /* Increment destination. */
  2005. - movb %ah, (%rdx) /* 2nd byte. */
  2006. - testb %ah, %ah /* Is it NUL?. */
  2007. - jz 4f /* yes, finish. */
  2008. - incq %rdx /* Increment destination. */
  2009. - shrq $16, %rax /* Shift... */
  2010. - jmp 3b /* and look at next two bytes in %rax. */
  2011. -4:
  2012. +L(64loop):
  2013. +#ifdef USE_AS_STRNCPY
  2014. + sub $8, %r11
  2015. + jbe L(tail)
  2016. +#endif
  2017. +
  2018. + mov (%rsi, %rdx), %rax
  2019. +
  2020. + mov %rcx, %r8
  2021. + add %rax, %r8
  2022. + sbb %r10, %r10
  2023. +
  2024. + xor %rax, %r8
  2025. + or %rcx, %r8
  2026. + sub %r10, %r8
  2027. + jnz L(tail)
  2028. +
  2029. + mov %rax, (%rdi, %rdx)
  2030. +
  2031. +#ifdef USE_AS_STRNCPY
  2032. + add $8, %edx
  2033. +
  2034. + sub $8, %r11
  2035. + jbe L(tail)
  2036. +
  2037. + mov (%rsi, %rdx), %rax
  2038. +#else
  2039. + mov 8 (%rsi, %rdx), %rax
  2040. + add $8, %edx
  2041. +#endif
  2042. +
  2043. + mov %rcx, %r8
  2044. + add %rax, %r8
  2045. + sbb %r10, %r10
  2046. +
  2047. + xor %rax, %r8
  2048. + or %rcx, %r8
  2049. + sub %r10, %r8
  2050. + jnz L(tail)
  2051. +
  2052. + mov %rax, (%rdi, %rdx)
  2053. +
  2054. +#ifdef USE_AS_STRNCPY
  2055. + add $8, %edx
  2056. +
  2057. + sub $8, %r11
  2058. + jbe L(tail)
  2059. +
  2060. + mov (%rsi, %rdx), %rax
  2061. +#else
  2062. + mov 8 (%rsi, %rdx), %rax
  2063. + add $8, %edx
  2064. +#endif
  2065. +
  2066. + mov %rcx, %r8
  2067. + add %rax, %r8
  2068. + sbb %r10, %r10
  2069. +
  2070. + xor %rax, %r8
  2071. + or %rcx, %r8
  2072. + sub %r10, %r8
  2073. + jnz L(tail)
  2074. +
  2075. + mov %rax, (%rdi, %rdx)
  2076. +
  2077. +#ifdef USE_AS_STRNCPY
  2078. + add $8, %edx
  2079. +
  2080. + sub $8, %r11
  2081. + jbe L(tail)
  2082. +
  2083. + mov (%rsi, %rdx), %rax
  2084. +#else
  2085. + mov 8 (%rsi, %rdx), %rax
  2086. + add $8, %edx
  2087. +#endif
  2088. +
  2089. + mov %rcx, %r8
  2090. + add %rax, %r8
  2091. + sbb %r10, %r10
  2092. +
  2093. + xor %rax, %r8
  2094. + or %rcx, %r8
  2095. + sub %r10, %r8
  2096. + jnz L(tail)
  2097. +
  2098. + mov %rax, (%rdi, %rdx)
  2099. +
  2100. +#ifdef USE_AS_STRNCPY
  2101. + add $8, %edx
  2102. +
  2103. + sub $8, %r11
  2104. + jbe L(tail)
  2105. +
  2106. + mov (%rsi, %rdx), %rax
  2107. +#else
  2108. + mov 8 (%rsi, %rdx), %rax
  2109. + add $8, %edx
  2110. +#endif
  2111. +
  2112. + mov %rcx, %r8
  2113. + add %rax, %r8
  2114. + sbb %r10, %r10
  2115. +
  2116. + xor %rax, %r8
  2117. + or %rcx, %r8
  2118. + sub %r10, %r8
  2119. + jnz L(tail)
  2120. +
  2121. + mov %rax, (%rdi, %rdx)
  2122. +
  2123. +#ifdef USE_AS_STRNCPY
  2124. + add $8, %edx
  2125. +
  2126. + sub $8, %r11
  2127. + jbe L(tail)
  2128. +
  2129. + mov (%rsi, %rdx), %rax
  2130. +#else
  2131. + mov 8 (%rsi, %rdx), %rax
  2132. + add $8, %edx
  2133. +#endif
  2134. +
  2135. + mov %rcx, %r8
  2136. + add %rax, %r8
  2137. + sbb %r10, %r10
  2138. +
  2139. + xor %rax, %r8
  2140. + or %rcx, %r8
  2141. + sub %r10, %r8
  2142. + jnz L(tail)
  2143. +
  2144. + mov %rax, (%rdi, %rdx)
  2145. +
  2146. +#ifdef USE_AS_STRNCPY
  2147. + add $8, %edx
  2148. +
  2149. + sub $8, %r11
  2150. + jbe L(tail)
  2151. +
  2152. + mov (%rsi, %rdx), %rax
  2153. +#else
  2154. + mov 8 (%rsi, %rdx), %rax
  2155. + add $8, %edx
  2156. +#endif
  2157. +
  2158. + mov %rcx, %r8
  2159. + add %rax, %r8
  2160. + sbb %r10, %r10
  2161. +
  2162. + xor %rax, %r8
  2163. + or %rcx, %r8
  2164. + sub %r10, %r8
  2165. + jnz L(tail)
  2166. +
  2167. + mov %rax, (%rdi, %rdx)
  2168. +
  2169. +#ifdef USE_AS_STRNCPY
  2170. + add $8, %edx
  2171. +
  2172. + sub $8, %r11
  2173. + jbe L(tail)
  2174. +
  2175. + mov (%rsi, %rdx), %rax
  2176. +#else
  2177. + mov 8 (%rsi, %rdx), %rax
  2178. + add $8, %edx
  2179. +#endif
  2180. +
  2181. + mov %rcx, %r8
  2182. + add %rax, %r8
  2183. + sbb %r10, %r10
  2184. +
  2185. + xor %rax, %r8
  2186. + or %rcx, %r8
  2187. + sub %r10, %r8
  2188. + jnz L(tail)
  2189. +
  2190. + mov %rax, (%rdi, %rdx)
  2191. +
  2192. + add $8, %edx
  2193. +
  2194. + cmp %r9, %rdx
  2195. + jbe L(64loop)
  2196. +
  2197. +L(64after): # up to 1/2 L1
  2198. +
  2199. +L(pretry): # up to 1/2 L2
  2200. +#ifdef PIC
  2201. +# ifdef SHARED
  2202. + mov _rtld_local_ro@GOTPCREL (%rip), %r9
  2203. + cmpl $0, RTLD_GLOBAL_DL_PREFETCHW (%r9)
  2204. + mov RTLD_GLOBAL_DL_CACHE2SIZEHALF (%r9), %r9
  2205. +# else
  2206. + mov _dl_prefetchw@GOTPCREL (%rip), %r9
  2207. + cmpl $0, (%r9)
  2208. + mov _dl_cache2sizehalf@GOTPCREL (%rip), %r9
  2209. + mov (%r9), %r9
  2210. +# endif
  2211. +#else
  2212. + cmpl $0, _dl_prefetchw
  2213. + mov _dl_cache2sizehalf, %r9
  2214. +#endif
  2215. + jz L(preloop) # check for availability of PREFETCHW
  2216. +
  2217. + .p2align 4
  2218. +
  2219. +L(prewloop): # 64-byte with prefetching to state M
  2220. +#ifdef USE_AS_STRNCPY
  2221. + sub $8, %r11
  2222. + jbe L(tail)
  2223. +#endif
  2224. +
  2225. + mov (%rsi, %rdx), %rax
  2226. +
  2227. + mov %rcx, %r8
  2228. + add %rax, %r8
  2229. + sbb %r10, %r10
  2230. +
  2231. + xor %rax, %r8
  2232. + or %rcx, %r8
  2233. + sub %r10, %r8
  2234. + jnz L(tail)
  2235. +
  2236. + mov %rax, (%rdi, %rdx)
  2237. +
  2238. +#ifdef USE_AS_STRNCPY
  2239. + add $8, %edx
  2240. +
  2241. + sub $8, %r11
  2242. + jbe L(tail)
  2243. +
  2244. + mov (%rsi, %rdx), %rax
  2245. +#else
  2246. + mov 8 (%rsi, %rdx), %rax
  2247. + add $8, %edx
  2248. +#endif
  2249. +
  2250. + mov %rcx, %r8
  2251. + add %rax, %r8
  2252. + sbb %r10, %r10
  2253. +
  2254. + xor %rax, %r8
  2255. + or %rcx, %r8
  2256. + sub %r10, %r8
  2257. + jnz L(tail)
  2258. +
  2259. + mov %rax, (%rdi, %rdx)
  2260. +
  2261. +#ifdef USE_AS_STRNCPY
  2262. + add $8, %edx
  2263. +
  2264. + sub $8, %r11
  2265. + jbe L(tail)
  2266. +
  2267. + mov (%rsi, %rdx), %rax
  2268. +#else
  2269. + mov 8 (%rsi, %rdx), %rax
  2270. + add $8, %edx
  2271. +#endif
  2272. +
  2273. + mov %rcx, %r8
  2274. + add %rax, %r8
  2275. + sbb %r10, %r10
  2276. +
  2277. + xor %rax, %r8
  2278. + or %rcx, %r8
  2279. + sub %r10, %r8
  2280. + jnz L(tail)
  2281. +
  2282. + mov %rax, (%rdi, %rdx)
  2283. +
  2284. +#ifdef USE_AS_STRNCPY
  2285. + add $8, %edx
  2286. +
  2287. + sub $8, %r11
  2288. + jbe L(tail)
  2289. +
  2290. + mov (%rsi, %rdx), %rax
  2291. +#else
  2292. + mov 8 (%rsi, %rdx), %rax
  2293. + add $8, %edx
  2294. +#endif
  2295. +
  2296. + mov %rcx, %r8
  2297. + add %rax, %r8
  2298. + sbb %r10, %r10
  2299. +
  2300. + xor %rax, %r8
  2301. + or %rcx, %r8
  2302. + sub %r10, %r8
  2303. + jnz L(tail)
  2304. +
  2305. + mov %rax, (%rdi, %rdx)
  2306. +
  2307. +#ifdef USE_AS_STRNCPY
  2308. + add $8, %edx
  2309. +
  2310. + sub $8, %r11
  2311. + jbe L(tail)
  2312. +
  2313. + mov (%rsi, %rdx), %rax
  2314. +#else
  2315. + mov 8 (%rsi, %rdx), %rax
  2316. + add $8, %edx
  2317. +#endif
  2318. +
  2319. + mov %rcx, %r8
  2320. + add %rax, %r8
  2321. + sbb %r10, %r10
  2322. +
  2323. + xor %rax, %r8
  2324. + or %rcx, %r8
  2325. + sub %r10, %r8
  2326. + jnz L(tail)
  2327. +
  2328. + mov %rax, (%rdi, %rdx)
  2329. +
  2330. +#ifdef USE_AS_STRNCPY
  2331. + add $8, %edx
  2332. +
  2333. + sub $8, %r11
  2334. + jbe L(tail)
  2335. +
  2336. + mov (%rsi, %rdx), %rax
  2337. +#else
  2338. + mov 8 (%rsi, %rdx), %rax
  2339. + add $8, %edx
  2340. +#endif
  2341. +
  2342. + mov %rcx, %r8
  2343. + add %rax, %r8
  2344. + sbb %r10, %r10
  2345. +
  2346. + xor %rax, %r8
  2347. + or %rcx, %r8
  2348. + sub %r10, %r8
  2349. + jnz L(tail)
  2350. +
  2351. + mov %rax, (%rdi, %rdx)
  2352. +
  2353. +#ifdef USE_AS_STRNCPY
  2354. + add $8, %edx
  2355. +
  2356. + sub $8, %r11
  2357. + jbe L(tail)
  2358. +
  2359. + mov (%rsi, %rdx), %rax
  2360. +#else
  2361. + mov 8 (%rsi, %rdx), %rax
  2362. + add $8, %edx
  2363. +#endif
  2364. +
  2365. + mov %rcx, %r8
  2366. + add %rax, %r8
  2367. + sbb %r10, %r10
  2368. +
  2369. + xor %rax, %r8
  2370. + or %rcx, %r8
  2371. + sub %r10, %r8
  2372. + jnz L(tail)
  2373. +
  2374. + mov %rax, (%rdi, %rdx)
  2375. +
  2376. +#ifdef USE_AS_STRNCPY
  2377. + add $8, %edx
  2378. +
  2379. + sub $8, %r11
  2380. + jbe L(tail)
  2381. +
  2382. + mov (%rsi, %rdx), %rax
  2383. +#else
  2384. + mov 8 (%rsi, %rdx), %rax
  2385. + add $8, %edx
  2386. +#endif
  2387. +
  2388. + mov %rcx, %r8
  2389. + add %rax, %r8
  2390. + sbb %r10, %r10
  2391. +
  2392. + xor %rax, %r8
  2393. + or %rcx, %r8
  2394. + sub %r10, %r8
  2395. + jnz L(tail)
  2396. +
  2397. + mov %rax, (%rdi, %rdx)
  2398. +
  2399. + prefetchw 512 + 8 (%rdi, %rdx)
  2400. + prefetcht0 512 + 8 (%rsi, %rdx)
  2401. +
  2402. + add $8, %edx
  2403. +
  2404. + cmp %r9, %rdx
  2405. + jb L(prewloop)
  2406. + jmp L(preafter)
  2407. +
  2408. +L(prewafter): # up to 1/2 L2
  2409. +
  2410. + .p2align 4
  2411. +
  2412. +L(preloop): # 64-byte with prefetching to state E
  2413. +#ifdef USE_AS_STRNCPY
  2414. + sub $8, %r11
  2415. + jbe L(tail)
  2416. +#endif
  2417. +
  2418. + mov (%rsi, %rdx), %rax
  2419. +
  2420. + mov %rcx, %r8
  2421. + add %rax, %r8
  2422. + sbb %r10, %r10
  2423. +
  2424. + xor %rax, %r8
  2425. + or %rcx, %r8
  2426. + sub %r10, %r8
  2427. + jnz L(tail)
  2428. +
  2429. + mov %rax, (%rdi, %rdx)
  2430. +
  2431. +#ifdef USE_AS_STRNCPY
  2432. + add $8, %edx
  2433. +
  2434. + sub $8, %r11
  2435. + jbe L(tail)
  2436. +
  2437. + mov (%rsi, %rdx), %rax
  2438. +#else
  2439. + mov 8 (%rsi, %rdx), %rax
  2440. + add $8, %edx
  2441. +#endif
  2442. +
  2443. + mov %rcx, %r8
  2444. + add %rax, %r8
  2445. + sbb %r10, %r10
  2446. +
  2447. + xor %rax, %r8
  2448. + or %rcx, %r8
  2449. + sub %r10, %r8
  2450. + jnz L(tail)
  2451. +
  2452. + mov %rax, (%rdi, %rdx)
  2453. +
  2454. +#ifdef USE_AS_STRNCPY
  2455. + add $8, %edx
  2456. +
  2457. + sub $8, %r11
  2458. + jbe L(tail)
  2459. +
  2460. + mov (%rsi, %rdx), %rax
  2461. +#else
  2462. + mov 8 (%rsi, %rdx), %rax
  2463. + add $8, %edx
  2464. +#endif
  2465. +
  2466. + mov %rcx, %r8
  2467. + add %rax, %r8
  2468. + sbb %r10, %r10
  2469. +
  2470. + xor %rax, %r8
  2471. + or %rcx, %r8
  2472. + sub %r10, %r8
  2473. + jnz L(tail)
  2474. +
  2475. + mov %rax, (%rdi, %rdx)
  2476. +
  2477. +#ifdef USE_AS_STRNCPY
  2478. + add $8, %edx
  2479. +
  2480. + sub $8, %r11
  2481. + jbe L(tail)
  2482. +
  2483. + mov (%rsi, %rdx), %rax
  2484. +#else
  2485. + mov 8 (%rsi, %rdx), %rax
  2486. + add $8, %edx
  2487. +#endif
  2488. +
  2489. + mov %rcx, %r8
  2490. + add %rax, %r8
  2491. + sbb %r10, %r10
  2492. +
  2493. + xor %rax, %r8
  2494. + or %rcx, %r8
  2495. + sub %r10, %r8
  2496. + jnz L(tail)
  2497. +
  2498. + mov %rax, (%rdi, %rdx)
  2499. +
  2500. +#ifdef USE_AS_STRNCPY
  2501. + add $8, %edx
  2502. +
  2503. + sub $8, %r11
  2504. + jbe L(tail)
  2505. +
  2506. + mov (%rsi, %rdx), %rax
  2507. +#else
  2508. + mov 8 (%rsi, %rdx), %rax
  2509. + add $8, %edx
  2510. +#endif
  2511. +
  2512. + mov %rcx, %r8
  2513. + add %rax, %r8
  2514. + sbb %r10, %r10
  2515. +
  2516. + xor %rax, %r8
  2517. + or %rcx, %r8
  2518. + sub %r10, %r8
  2519. + jnz L(tail)
  2520. +
  2521. + mov %rax, (%rdi, %rdx)
  2522. +
  2523. +#ifdef USE_AS_STRNCPY
  2524. + add $8, %edx
  2525. +
  2526. + sub $8, %r11
  2527. + jbe L(tail)
  2528. +
  2529. + mov (%rsi, %rdx), %rax
  2530. +#else
  2531. + mov 8 (%rsi, %rdx), %rax
  2532. + add $8, %edx
  2533. +#endif
  2534. +
  2535. + mov %rcx, %r8
  2536. + add %rax, %r8
  2537. + sbb %r10, %r10
  2538. +
  2539. + xor %rax, %r8
  2540. + or %rcx, %r8
  2541. + sub %r10, %r8
  2542. + jnz L(tail)
  2543. +
  2544. + mov %rax, (%rdi, %rdx)
  2545. +
  2546. +#ifdef USE_AS_STRNCPY
  2547. + add $8, %edx
  2548. +
  2549. + sub $8, %r11
  2550. + jbe L(tail)
  2551. +
  2552. + mov (%rsi, %rdx), %rax
  2553. +#else
  2554. + mov 8 (%rsi, %rdx), %rax
  2555. + add $8, %edx
  2556. +#endif
  2557. +
  2558. + mov %rcx, %r8
  2559. + add %rax, %r8
  2560. + sbb %r10, %r10
  2561. +
  2562. + xor %rax, %r8
  2563. + or %rcx, %r8
  2564. + sub %r10, %r8
  2565. + jnz L(tail)
  2566. +
  2567. + mov %rax, (%rdi, %rdx)
  2568. +
  2569. +#ifdef USE_AS_STRNCPY
  2570. + add $8, %edx
  2571. +
  2572. + sub $8, %r11
  2573. + jbe L(tail)
  2574. +
  2575. + mov (%rsi, %rdx), %rax
  2576. +#else
  2577. + mov 8 (%rsi, %rdx), %rax
  2578. + add $8, %edx
  2579. +#endif
  2580. +
  2581. + mov %rcx, %r8
  2582. + add %rax, %r8
  2583. + sbb %r10, %r10
  2584. +
  2585. + xor %rax, %r8
  2586. + or %rcx, %r8
  2587. + sub %r10, %r8
  2588. + jnz L(tail)
  2589. +
  2590. + mov %rax, (%rdi, %rdx)
  2591. +
  2592. + prefetcht0 512 + 8 (%rdi, %rdx)
  2593. + prefetcht0 512 + 8 (%rsi, %rdx)
  2594. +
  2595. + add $8, %edx
  2596. +
  2597. + cmp %r9, %rdx
  2598. + jb L(preloop)
  2599. +
  2600. + .p2align 4
  2601. +
  2602. +L(preafter): # up to 1/2 of L2
  2603. +
  2604. +L(NTtry):
  2605. + mfence
  2606. +
  2607. +L(NT): # 64-byte NT
  2608. +
  2609. + .p2align 4
  2610. +
  2611. +L(NTloop):
  2612. +#ifdef USE_AS_STRNCPY
  2613. + sub $8, %r11
  2614. + jbe L(tail)
  2615. +#endif
  2616. +
  2617. + mov (%rsi, %rdx), %rax
  2618. +
  2619. + mov %rcx, %r8
  2620. + add %rax, %r8
  2621. + sbb %r10, %r10
  2622. +
  2623. + xor %rax, %r8
  2624. + or %rcx, %r8
  2625. + sub %r10, %r8
  2626. + jnz L(NTtail)
  2627. +
  2628. + movnti %rax, (%rdi, %rdx)
  2629. +
  2630. +#ifdef USE_AS_STRNCPY
  2631. + add $8, %rdx
  2632. +
  2633. + sub $8, %r11
  2634. + jbe L(tail)
  2635. +
  2636. + mov (%rsi, %rdx), %rax
  2637. +#else
  2638. + mov 8 (%rsi, %rdx), %rax
  2639. + add $8, %rdx
  2640. +#endif
  2641. +
  2642. + mov %rcx, %r8
  2643. + add %rax, %r8
  2644. + sbb %r10, %r10
  2645. +
  2646. + xor %rax, %r8
  2647. + or %rcx, %r8
  2648. + sub %r10, %r8
  2649. + jnz L(NTtail)
  2650. +
  2651. + movnti %rax, (%rdi, %rdx)
  2652. +
  2653. +#ifdef USE_AS_STRNCPY
  2654. + add $8, %rdx
  2655. +
  2656. + sub $8, %r11
  2657. + jbe L(tail)
  2658. +
  2659. + mov (%rsi, %rdx), %rax
  2660. +#else
  2661. + mov 8 (%rsi, %rdx), %rax
  2662. + add $8, %rdx
  2663. +#endif
  2664. +
  2665. + mov %rcx, %r8
  2666. + add %rax, %r8
  2667. + sbb %r10, %r10
  2668. +
  2669. + xor %rax, %r8
  2670. + or %rcx, %r8
  2671. + sub %r10, %r8
  2672. + jnz L(NTtail)
  2673. +
  2674. + movnti %rax, (%rdi, %rdx)
  2675. +
  2676. +#ifdef USE_AS_STRNCPY
  2677. + add $8, %rdx
  2678. +
  2679. + sub $8, %r11
  2680. + jbe L(tail)
  2681. +
  2682. + mov (%rsi, %rdx), %rax
  2683. +#else
  2684. + mov 8 (%rsi, %rdx), %rax
  2685. + add $8, %rdx
  2686. +#endif
  2687. +
  2688. + mov %rcx, %r8
  2689. + add %rax, %r8
  2690. + sbb %r10, %r10
  2691. +
  2692. + xor %rax, %r8
  2693. + or %rcx, %r8
  2694. + sub %r10, %r8
  2695. + jnz L(NTtail)
  2696. +
  2697. + movnti %rax, (%rdi, %rdx)
  2698. +
  2699. +#ifdef USE_AS_STRNCPY
  2700. + add $8, %rdx
  2701. +
  2702. + sub $8, %r11
  2703. + jbe L(tail)
  2704. +
  2705. + mov (%rsi, %rdx), %rax
  2706. +#else
  2707. + mov 8 (%rsi, %rdx), %rax
  2708. + add $8, %rdx
  2709. +#endif
  2710. +
  2711. + mov %rcx, %r8
  2712. + add %rax, %r8
  2713. + sbb %r10, %r10
  2714. +
  2715. + xor %rax, %r8
  2716. + or %rcx, %r8
  2717. + sub %r10, %r8
  2718. + jnz L(NTtail)
  2719. +
  2720. + movnti %rax, (%rdi, %rdx)
  2721. +
  2722. +#ifdef USE_AS_STRNCPY
  2723. + add $8, %rdx
  2724. +
  2725. + sub $8, %r11
  2726. + jbe L(tail)
  2727. +
  2728. + mov (%rsi, %rdx), %rax
  2729. +#else
  2730. + mov 8 (%rsi, %rdx), %rax
  2731. + add $8, %rdx
  2732. +#endif
  2733. +
  2734. + mov %rcx, %r8
  2735. + add %rax, %r8
  2736. + sbb %r10, %r10
  2737. +
  2738. + xor %rax, %r8
  2739. + or %rcx, %r8
  2740. + sub %r10, %r8
  2741. + jnz L(NTtail)
  2742. +
  2743. + movnti %rax, (%rdi, %rdx)
  2744. +
  2745. +#ifdef USE_AS_STRNCPY
  2746. + add $8, %rdx
  2747. +
  2748. + sub $8, %r11
  2749. + jbe L(tail)
  2750. +
  2751. + mov (%rsi, %rdx), %rax
  2752. +#else
  2753. + mov 8 (%rsi, %rdx), %rax
  2754. + add $8, %rdx
  2755. +#endif
  2756. +
  2757. + mov %rcx, %r8
  2758. + add %rax, %r8
  2759. + sbb %r10, %r10
  2760. +
  2761. + xor %rax, %r8
  2762. + or %rcx, %r8
  2763. + sub %r10, %r8
  2764. + jnz L(NTtail)
  2765. +
  2766. + movnti %rax, (%rdi, %rdx)
  2767. +
  2768. +#ifdef USE_AS_STRNCPY
  2769. + add $8, %rdx
  2770. +
  2771. + sub $8, %r11
  2772. + jbe L(tail)
  2773. +
  2774. + mov (%rsi, %rdx), %rax
  2775. +#else
  2776. + mov 8 (%rsi, %rdx), %rax
  2777. + add $8, %rdx
  2778. +#endif
  2779. +
  2780. + mov %rcx, %r8
  2781. + add %rax, %r8
  2782. + sbb %r10, %r10
  2783. +
  2784. + xor %rax, %r8
  2785. + or %rcx, %r8
  2786. + sub %r10, %r8
  2787. + jnz L(NTtail)
  2788. +
  2789. + movnti %rax, (%rdi, %rdx)
  2790. +
  2791. + prefetchnta 768 + 8 (%rsi, %rdx)
  2792. +
  2793. + add $8, %rdx
  2794. + jmp L(NTloop)
  2795. +
  2796. + .p2align 4
  2797. +
  2798. +L(NTtail):
  2799. + mfence # serialize memory operations
  2800. +
  2801. + .p2align 4
  2802. +
  2803. +L(NTafter):
  2804. +
  2805. +L(tailtry):
  2806. +
  2807. +L(tail): # 1-byte tail
  2808. +#ifdef USE_AS_STRNCPY
  2809. + add $8, %r11
  2810. + jz L(exit)
  2811. +#endif
  2812. +
  2813. + .p2align 4
  2814. +
  2815. +L(tailloop):
  2816. + movzbl (%rsi, %rdx), %eax
  2817. + test %al, %al
  2818. + mov %al, (%rdi, %rdx)
  2819. + jz L(exit)
  2820. +
  2821. + inc %rdx
  2822. +
  2823. +#ifdef USE_AS_STRNCPY
  2824. + dec %r11
  2825. + jz L(exit)
  2826. +#endif
  2827. + jmp L(tailloop)
  2828. +
  2829. + .p2align 4
  2830. +
  2831. +L(tailafter):
  2832. +
  2833. +L(exit):
  2834. +#ifdef USE_AS_STPCPY
  2835. + lea (%rdi, %rdx), %rax
  2836. +#else
  2837. + mov %rdi, %rax
  2838. +#endif
  2839. +
  2840. +#ifdef USE_AS_STRNCPY
  2841. + test %r11, %r11
  2842. + mov %r11, %rcx
  2843. + jnz 2f
  2844. +
  2845. + rep
  2846. + ret
  2847. +
  2848. + .p2align 4
  2849. +
  2850. +2:
  2851. #ifdef USE_AS_STPCPY
  2852. - movq %rdx, %rax /* Destination is return value. */
  2853. + mov %rax, %r8
  2854. #else
  2855. - movq %rdi, %rax /* Source is return value. */
  2856. + mov %rdi, %r8
  2857. +# endif
  2858. +
  2859. + xor %eax, %eax # bzero () would do too, but usually there are only a handfull of bytes left
  2860. + shr $3, %rcx
  2861. + lea (%rdi, %rdx), %rdi
  2862. + jz 3f
  2863. +
  2864. + rep stosq
  2865. +
  2866. + and $7, %r11d
  2867. + jz 1f
  2868. +
  2869. + .p2align 4,, 4
  2870. +
  2871. +3:
  2872. + mov %al, (%rdi)
  2873. + inc %rdi
  2874. +
  2875. + dec %r11d
  2876. + jnz 3b
  2877. +
  2878. + .p2align 4,, 4
  2879. +
  2880. +1:
  2881. + mov %r8, %rax
  2882. #endif
  2883. - retq
  2884. -END (BP_SYM (STRCPY))
  2885. -#ifndef USE_AS_STPCPY
  2886. + ret
  2887. +
  2888. +END (strcpy)
  2889. +
  2890. +#if !defined USE_AS_STPCPY && !defined USE_AS_STRNCPY
  2891. libc_hidden_builtin_def (strcpy)
  2892. #endif
  2893. diff -Npruw -x CVS -x vssver.scc -x powerpc -x sync_file_range.c libc/sysdeps/x86_64/strncpy.S libc/sysdeps/x86_64/strncpy.S
  2894. --- libc/sysdeps/x86_64/strncpy.S 1969-12-31 18:00:00.000000000 -0600
  2895. +++ libc/sysdeps/x86_64/strncpy.S 2006-05-05 15:25:34.559341000 -0500
  2896. @@ -0,0 +1,8 @@
  2897. +#define USE_AS_STRNCPY
  2898. +#define strcpy __strncpy
  2899. +
  2900. +#include <sysdeps/x86_64/strcpy.S>
  2901. +
  2902. +weak_alias (__strncpy, strncpy)
  2903. +libc_hidden_def (__strncpy)
  2904. +libc_hidden_builtin_def (strncpy)