ffmpeg/libavutil/mem_internal.h
Anton Khirnov e15371061d lavu/mem: move the DECLARE_ALIGNED macro family to mem_internal on next+1 bump
They are not properly namespaced and not intended for public use.
2021-01-01 14:14:57 +01:00

158 lines
5.2 KiB
C

/*
* Copyright (c) 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_MEM_INTERNAL_H
#define AVUTIL_MEM_INTERNAL_H
#include "config.h"
#include <stdint.h>
#include "avassert.h"
#include "mem.h"
#include "version.h"
#if !FF_API_DECLARE_ALIGNED
/**
* @def DECLARE_ALIGNED(n,t,v)
* Declare a variable that is aligned in memory.
*
* @code{.c}
* DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
* DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
*
* // The default-alignment equivalent would be
* uint16_t aligned_int = 42;
* uint8_t aligned_array[128];
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
/**
* @def DECLARE_ASM_ALIGNED(n,t,v)
* Declare an aligned variable appropriate for use in inline assembly code.
*
* @code{.c}
* DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
/**
* @def DECLARE_ASM_CONST(n,t,v)
* Declare a static constant aligned variable appropriate for use in inline
* assembly code.
*
* @code{.c}
* DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
* @endcode
*
* @param n Minimum alignment in bytes
* @param t Type of the variable (or array element)
* @param v Name of the variable
*/
#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
#define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
#elif defined(__DJGPP__)
#define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
#define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
#elif defined(__GNUC__) || defined(__clang__)
#define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
#define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
#define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
#elif defined(_MSC_VER)
#define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
#define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
#define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
#else
#define DECLARE_ALIGNED(n,t,v) t v
#define DECLARE_ASM_ALIGNED(n,t,v) t v
#define DECLARE_ASM_CONST(n,t,v) static const t v
#endif
#endif
// Some broken preprocessors need a second expansion
// to be forced to tokenize __VA_ARGS__
#define E1(x) x
#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
uint8_t la_##v[sizeof(t s o) + (a)]; \
t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
DECLARE_ALIGNED(a, t, la_##v) s o; \
t (*v) o = la_##v
#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
#endif
#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
#else
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
#endif
static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc)
{
void *val;
memcpy(&val, ptr, sizeof(val));
if (min_size <= *size) {
av_assert0(val || !min_size);
return 0;
}
min_size = FFMAX(min_size + min_size / 16 + 32, min_size);
av_freep(ptr);
val = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size);
memcpy(ptr, &val, sizeof(val));
if (!val)
min_size = 0;
*size = min_size;
return 1;
}
#endif /* AVUTIL_MEM_INTERNAL_H */