#include #include #include #include #ifdef __GNUC__ #if __BYTE_ORDER == __LITTLE_ENDIAN #define LS >> #define RS << #else #define LS << #define RS >> #endif typedef uint32_t __attribute__((__may_alias__)) u32; typedef uint16_t __attribute__((__may_alias__)) u16; static inline uint32_t shifted_block_copy(unsigned char *d, const unsigned char *s, uint32_t w, int ls) { int rs = 32-ls; uint32_t t1 = *(u32 *)(s+4); uint32_t t2 = *(u32 *)(s+8); uint32_t t3 = *(u32 *)(s+12); uint32_t t4 = *(u32 *)(s+16); uint32_t t5 = *(u32 *)(s+20); uint32_t t6 = *(u32 *)(s+24); uint32_t t7 = *(u32 *)(s+28); uint32_t t8 = *(u32 *)(s+32); __asm__ __volatile__ ( "" : : "r"(s), "r"(d) : "memory" ); *(u32 *)(d) = (w LS ls) | (t1 RS rs); *(u32 *)(d+4) = (t1 LS ls) | (t2 RS rs); *(u32 *)(d+8) = (t2 LS ls) | (t3 RS rs); *(u32 *)(d+12) = (t3 LS ls) | (t4 RS rs); *(u32 *)(d+16) = (t4 LS ls) | (t5 RS rs); *(u32 *)(d+20) = (t5 LS ls) | (t6 RS rs); *(u32 *)(d+24) = (t6 LS ls) | (t7 RS rs); *(u32 *)(d+28) = (t7 LS ls) | (t8 RS rs); return t8; } #endif void *memcpy(void *restrict dest, const void *restrict src, size_t n) { unsigned char *d = dest; const unsigned char *s = src; #ifdef __GNUC__ for (; (uintptr_t)s % 4 && n; n--) *d++ = *s++; if ((uintptr_t)d % 4 == 0) { size_t c32 = n>>5, c4 = (n&31)>>2, c1=n&3; for (; c32; c32--, s+=32, d+=32) { uint32_t t0 = *(u32 *)(s+0); uint32_t t1 = *(u32 *)(s+4); uint32_t t2 = *(u32 *)(s+8); uint32_t t3 = *(u32 *)(s+12); uint32_t t4 = *(u32 *)(s+16); uint32_t t5 = *(u32 *)(s+20); uint32_t t6 = *(u32 *)(s+24); uint32_t t7 = *(u32 *)(s+28); __asm__ __volatile__ ( "" : : "r"(s), "r"(d) : "memory" ); *(u32 *)(d+0) = t0; *(u32 *)(d+4) = t1; *(u32 *)(d+8) = t2; *(u32 *)(d+12) = t3; *(u32 *)(d+16) = t4; *(u32 *)(d+20) = t5; *(u32 *)(d+24) = t6; *(u32 *)(d+28) = t7; } for (; c4; c4--, s+=4, d+=4) { *(u32 *)d = *(u32 *)s; } for (; c1; c1--, s++, d++) { *d = *s; } return dest; } if (!n) return dest; size_t c32 = n>=36 ? (n-4)>>5 : 0; uint32_t w = *(u32 *)s; n -= (c32<<5); if (c32) switch ((uintptr_t)d % 4) { case 1: d[0] = s[0]; d[1] = s[1]; d[2] = s[2]; d += 3; n -= 3; for (; c32; c32--, s+=32, d+=32) w = shifted_block_copy(d, s, w, 24); s += 3; break; case 2: *(u16 *)d = *(u16 *)s; d += 2; n -= 2; for (; c32; c32--, s+=32, d+=32) w = shifted_block_copy(d, s, w, 16); s += 2; break; case 3: d[0] = s[0]; d += 1; n -= 1; for (; c32; c32--, s+=32, d+=32) w = shifted_block_copy(d, s, w, 8); s += 1; break; } #endif for (; n; n--) *d++ = *s++; return dest; }