x86: Remove inline MMX assembly that clobbers the FPU state

These inline implementations of AV_COPY64, AV_SWAP64 and AV_ZERO64
are known to clobber the FPU state - which has to be restored
with the 'emms' instruction afterwards.

This was known and signaled with the FF_COPY_SWAP_ZERO_USES_MMX
define, which calling code seems to have been supposed to check,
in order to call emms_c() after using them. See
0b1972d409,
29c4c0886d and
df215e5758 for history on earlier
fixes in the same area.

However, new code can use these AV_*64() macros without knowing
about the need to call emms_c().

Just get rid of these dangerous inline assembly snippets; this
doesn't make any difference for 64 bit architectures anyway.

Signed-off-by: Martin Storsjö <martin@martin.st>
This commit is contained in:
Martin Storsjö 2024-01-26 14:55:49 +02:00
parent d5aaed9d4c
commit 7ec2354c38
2 changed files with 0 additions and 52 deletions

View File

@ -770,10 +770,6 @@ static void erase_adpcm_history(DCACoreDecoder *s)
for (ch = 0; ch < DCA_CHANNELS; ch++)
for (band = 0; band < DCA_SUBBANDS; band++)
AV_ZERO128(s->subband_samples[ch][band] - DCA_ADPCM_COEFFS);
#ifdef FF_COPY_SWAP_ZERO_USES_MMX
emms_c();
#endif
}
static int alloc_sample_buffer(DCACoreDecoder *s)
@ -837,10 +833,6 @@ static int parse_frame_data(DCACoreDecoder *s, enum HeaderType header, int xch_b
}
}
#ifdef FF_COPY_SWAP_ZERO_USES_MMX
emms_c();
#endif
return 0;
}
@ -1283,10 +1275,6 @@ static void erase_x96_adpcm_history(DCACoreDecoder *s)
for (ch = 0; ch < DCA_CHANNELS; ch++)
for (band = 0; band < DCA_SUBBANDS_X96; band++)
AV_ZERO128(s->x96_subband_samples[ch][band] - DCA_ADPCM_COEFFS);
#ifdef FF_COPY_SWAP_ZERO_USES_MMX
emms_c();
#endif
}
static int alloc_x96_sample_buffer(DCACoreDecoder *s)
@ -1516,10 +1504,6 @@ static int parse_x96_frame_data(DCACoreDecoder *s, int exss, int xch_base)
}
}
#ifdef FF_COPY_SWAP_ZERO_USES_MMX
emms_c();
#endif
return 0;
}

View File

@ -27,42 +27,6 @@
#if HAVE_MMX
#if !HAVE_FAST_64BIT && defined(__MMX__)
#define FF_COPY_SWAP_ZERO_USES_MMX
#define AV_COPY64 AV_COPY64
static av_always_inline void AV_COPY64(void *d, const void *s)
{
__asm__("movq %1, %%mm0 \n\t"
"movq %%mm0, %0 \n\t"
: "=m"(*(uint64_t*)d)
: "m" (*(const uint64_t*)s)
: "mm0");
}
#define AV_SWAP64 AV_SWAP64
static av_always_inline void AV_SWAP64(void *a, void *b)
{
__asm__("movq %1, %%mm0 \n\t"
"movq %0, %%mm1 \n\t"
"movq %%mm0, %0 \n\t"
"movq %%mm1, %1 \n\t"
: "+m"(*(uint64_t*)a), "+m"(*(uint64_t*)b)
::"mm0", "mm1");
}
#define AV_ZERO64 AV_ZERO64
static av_always_inline void AV_ZERO64(void *d)
{
__asm__("pxor %%mm0, %%mm0 \n\t"
"movq %%mm0, %0 \n\t"
: "=m"(*(uint64_t*)d)
:: "mm0");
}
#endif /* !HAVE_FAST_64BIT && defined(__MMX__) */
#ifdef __SSE__
#define AV_COPY128 AV_COPY128