Commit d7134d38 authored by Sam Lantinga's avatar Sam Lantinga

Okay, I figured out the intrinsics for SIMD memcpy

--HG--
extra : convert_revision : svn%3Ac70aab31-4412-0410-b14c-859654838e24/trunk%402610
parent 37fe3a93
...@@ -24,43 +24,42 @@ ...@@ -24,43 +24,42 @@
#include "SDL_video.h" #include "SDL_video.h"
#include "SDL_blit.h" #include "SDL_blit.h"
/* The MMX/SSE intrinsics don't give access to specific registers for #ifdef __MMX__
the most memory parallelism, so we'll use GCC inline assembly here... #include <mmintrin.h>
*/ #endif
#ifndef __GNUC__ #ifdef __SSE__
#undef __MMX__ #include <xmmintrin.h>
#undef __SSE__
#endif #endif
#ifdef __MMX__ #ifdef __MMX__
static __inline__ void static __inline__ void
SDL_memcpyMMX(Uint8 *dst, const Uint8 *src, int len) SDL_memcpyMMX(Uint8 * dst, const Uint8 * src, int len)
{ {
int i; int i;
__m64 values[8];
for (i = len / 64; i--;) { for (i = len / 64; i--;) {
__asm__ __volatile__ ( _mm_prefetch(src, _MM_HINT_NTA);
"prefetchnta (%0)\n" values[0] = *(__m64 *) (src + 0);
"movq (%0), %%mm0\n" values[1] = *(__m64 *) (src + 8);
"movq 8(%0), %%mm1\n" values[2] = *(__m64 *) (src + 16);
"movq 16(%0), %%mm2\n" values[3] = *(__m64 *) (src + 24);
"movq 24(%0), %%mm3\n" values[4] = *(__m64 *) (src + 32);
"movq 32(%0), %%mm4\n" values[5] = *(__m64 *) (src + 40);
"movq 40(%0), %%mm5\n" values[6] = *(__m64 *) (src + 48);
"movq 48(%0), %%mm6\n" values[7] = *(__m64 *) (src + 56);
"movq 56(%0), %%mm7\n" _mm_stream_pi((__m64 *) (dst + 0), values[0]);
"movntq %%mm0, (%1)\n" _mm_stream_pi((__m64 *) (dst + 8), values[1]);
"movntq %%mm1, 8(%1)\n" _mm_stream_pi((__m64 *) (dst + 16), values[2]);
"movntq %%mm2, 16(%1)\n" _mm_stream_pi((__m64 *) (dst + 24), values[3]);
"movntq %%mm3, 24(%1)\n" _mm_stream_pi((__m64 *) (dst + 32), values[4]);
"movntq %%mm4, 32(%1)\n" _mm_stream_pi((__m64 *) (dst + 40), values[5]);
"movntq %%mm5, 40(%1)\n" _mm_stream_pi((__m64 *) (dst + 48), values[6]);
"movntq %%mm6, 48(%1)\n" _mm_stream_pi((__m64 *) (dst + 56), values[7]);
"movntq %%mm7, 56(%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 64; src += 64;
dst += 64; dst += 64;
} }
if (len & 63) if (len & 63)
SDL_memcpy(dst, src, len & 63); SDL_memcpy(dst, src, len & 63);
} }
...@@ -68,25 +67,25 @@ SDL_memcpyMMX(Uint8 *dst, const Uint8 *src, int len) ...@@ -68,25 +67,25 @@ SDL_memcpyMMX(Uint8 *dst, const Uint8 *src, int len)
#ifdef __SSE__ #ifdef __SSE__
static __inline__ void static __inline__ void
SDL_memcpySSE(Uint8 *dst, const Uint8 *src, int len) SDL_memcpySSE(Uint8 * dst, const Uint8 * src, int len)
{ {
int i; int i;
__m128 values[4];
for (i = len / 64; i--;) { for (i = len / 64; i--;) {
__asm__ __volatile__ ( _mm_prefetch(src, _MM_HINT_NTA);
"prefetchnta (%0)\n" values[0] = *(__m128 *) (src + 0);
"movaps (%0), %%xmm0\n" values[1] = *(__m128 *) (src + 16);
"movaps 16(%0), %%xmm1\n" values[2] = *(__m128 *) (src + 32);
"movaps 32(%0), %%xmm2\n" values[3] = *(__m128 *) (src + 48);
"movaps 48(%0), %%xmm3\n" _mm_stream_ps((float *) (dst + 0), values[0]);
"movntps %%xmm0, (%1)\n" _mm_stream_ps((float *) (dst + 16), values[1]);
"movntps %%xmm1, 16(%1)\n" _mm_stream_ps((float *) (dst + 32), values[2]);
"movntps %%xmm2, 32(%1)\n" _mm_stream_ps((float *) (dst + 48), values[3]);
"movntps %%xmm3, 48(%1)\n"
:: "r" (src), "r" (dst) : "memory");
src += 64; src += 64;
dst += 64; dst += 64;
} }
if (len & 63) if (len & 63)
SDL_memcpy(dst, src, len & 63); SDL_memcpy(dst, src, len & 63);
} }
...@@ -107,7 +106,7 @@ SDL_BlitCopy(SDL_BlitInfo * info) ...@@ -107,7 +106,7 @@ SDL_BlitCopy(SDL_BlitInfo * info)
dstskip = w + info->d_skip; dstskip = w + info->d_skip;
#ifdef __SSE__ #ifdef __SSE__
if (SDL_HasSSE() && !((uintptr_t)src & 15) && !((uintptr_t)dst & 15)) { if (SDL_HasSSE() && !((uintptr_t) src & 15) && !((uintptr_t) dst & 15)) {
while (h--) { while (h--) {
SDL_memcpySSE(dst, src, w); SDL_memcpySSE(dst, src, w);
src += srcskip; src += srcskip;
...@@ -118,13 +117,13 @@ SDL_BlitCopy(SDL_BlitInfo * info) ...@@ -118,13 +117,13 @@ SDL_BlitCopy(SDL_BlitInfo * info)
#endif #endif
#ifdef __MMX__ #ifdef __MMX__
if (SDL_HasMMX() && !((uintptr_t)src & 7) && !((uintptr_t)dst & 7)) { if (SDL_HasMMX() && !((uintptr_t) src & 7) && !((uintptr_t) dst & 7)) {
while (h--) { while (h--) {
SDL_memcpyMMX(dst, src, w); SDL_memcpyMMX(dst, src, w);
src += srcskip; src += srcskip;
dst += dstskip; dst += dstskip;
} }
__asm__ __volatile__(" emms\n"::); _mm_empty();
return; return;
} }
#endif #endif
...@@ -148,7 +147,7 @@ SDL_BlitCopyOverlap(SDL_BlitInfo * info) ...@@ -148,7 +147,7 @@ SDL_BlitCopyOverlap(SDL_BlitInfo * info)
src = info->s_pixels; src = info->s_pixels;
dst = info->d_pixels; dst = info->d_pixels;
skip = w + info->s_skip; skip = w + info->s_skip;
if ((dst < src) || (dst >= (src + h*skip))) { if ((dst < src) || (dst >= (src + h * skip))) {
SDL_BlitCopy(info); SDL_BlitCopy(info);
} else { } else {
src += ((h - 1) * skip); src += ((h - 1) * skip);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment