diff --git a/ffmpeg.c b/ffmpeg.c index 9b6c19d9f1..40263e80ea 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -2616,6 +2616,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files, #endif break; case AVMEDIA_TYPE_SUBTITLE: + codec->time_base = (AVRational){1, 1000}; break; default: abort(); diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index 35c9a23dd6..1cd7d1e66d 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -25,13 +25,13 @@ #include "bytestream.h" #include "libavutil/audioconvert.h" #include "libavutil/avassert.h" +#include "libavutil/opt.h" /** * @file * Monkey's Audio lossless audio decoder */ -#define BLOCKS_PER_LOOP 4608 #define MAX_CHANNELS 2 #define MAX_BYTESPERSAMPLE 3 @@ -126,6 +126,7 @@ typedef struct APEPredictor { /** Decoder context */ typedef struct APEContext { + AVClass *class; ///< class for AVOptions AVCodecContext *avctx; AVFrame frame; DSPContext dsp; @@ -142,8 +143,10 @@ typedef struct APEContext { int frameflags; ///< frame flags APEPredictor predictor; ///< predictor used for final reconstruction - int32_t decoded0[BLOCKS_PER_LOOP]; ///< decoded data for the first channel - int32_t decoded1[BLOCKS_PER_LOOP]; ///< decoded data for the second channel + int32_t *decoded_buffer; + int decoded_size; + int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel + int blocks_per_loop; ///< maximum number of samples to decode for each call int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory @@ -170,8 +173,9 @@ static av_cold int ape_decode_close(AVCodecContext *avctx) for (i = 0; i < APE_FILTER_LEVELS; i++) av_freep(&s->filterbuf[i]); + av_freep(&s->decoded_buffer); av_freep(&s->data); - s->data_size = 0; + s->decoded_size = s->data_size = 0; return 0; } @@ -469,19 +473,13 @@ static inline int ape_decode_value(APEContext *ctx, APERice *rice) static void entropy_decode(APEContext *ctx, int blockstodecode, int stereo) { - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - /* We are pure silence, just memset the output buffer. */ - memset(decoded0, 0, blockstodecode * sizeof(int32_t)); - memset(decoded1, 0, blockstodecode * sizeof(int32_t)); - } else { - while (blockstodecode--) { - *decoded0++ = ape_decode_value(ctx, &ctx->riceY); - if (stereo) - *decoded1++ = ape_decode_value(ctx, &ctx->riceX); - } + while (blockstodecode--) { + *decoded0++ = ape_decode_value(ctx, &ctx->riceY); + if (stereo) + *decoded1++ = ape_decode_value(ctx, &ctx->riceX); } } @@ -525,7 +523,7 @@ static void init_predictor_decoder(APEContext *ctx) APEPredictor *p = &ctx->predictor; /* Zero the history buffers */ - memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t)); + memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; /* Initialize and zero the coefficients */ @@ -593,8 +591,8 @@ static av_always_inline int predictor_update_filter(APEPredictor *p, static void predictor_decode_stereo(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; while (count--) { /* Predictor Y */ @@ -610,7 +608,8 @@ static void predictor_decode_stereo(APEContext *ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } } @@ -619,7 +618,7 @@ static void predictor_decode_stereo(APEContext *ctx, int count) static void predictor_decode_mono(APEContext *ctx, int count) { APEPredictor *p = &ctx->predictor; - int32_t *decoded0 = ctx->decoded0; + int32_t *decoded0 = ctx->decoded[0]; int32_t predictionA, currentA, A, sign; currentA = p->lastA[0]; @@ -650,7 +649,8 @@ static void predictor_decode_mono(APEContext *ctx, int count) /* Have we filled the history buffer? */ if (p->buf == p->historybuffer + HISTORY_SIZE) { - memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); p->buf = p->historybuffer; } @@ -668,8 +668,8 @@ static void do_init_filter(APEFilter *f, int16_t *buf, int order) f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; - memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t)); - memset(f->coeffs, 0, order * sizeof(int16_t)); + memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer)); + memset(f->coeffs, 0, order * sizeof(*f->coeffs)); f->avg = 0; } @@ -725,7 +725,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, /* Have we filled the history buffer? */ if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) { memmove(f->historybuffer, f->delay - (order * 2), - (order * 2) * sizeof(int16_t)); + (order * 2) * sizeof(*f->historybuffer)); f->delay = f->historybuffer + order * 2; f->adaptcoeffs = f->historybuffer + order; } @@ -773,33 +773,29 @@ static int init_frame_decoder(APEContext *ctx) static void ape_unpack_mono(APEContext *ctx, int count) { - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; - if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { - entropy_decode(ctx, count, 0); /* We are pure silence, so we're done. */ av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n"); return; } entropy_decode(ctx, count, 0); - ape_apply_filters(ctx, decoded0, NULL, count); + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); /* Now apply the predictor decoding */ predictor_decode_mono(ctx, count); /* Pseudo-stereo - just copy left channel to right channel */ if (ctx->channels == 2) { - memcpy(decoded1, decoded0, count * sizeof(*decoded1)); + memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1])); } } static void ape_unpack_stereo(APEContext *ctx, int count) { int32_t left, right; - int32_t *decoded0 = ctx->decoded0; - int32_t *decoded1 = ctx->decoded1; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { /* We are pure silence, so we're done. */ @@ -883,9 +879,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, } s->samples = nblocks; - memset(s->decoded0, 0, sizeof(s->decoded0)); - memset(s->decoded1, 0, sizeof(s->decoded1)); - /* Initialize the frame decoder */ if (init_frame_decoder(s) < 0) { av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n"); @@ -900,7 +893,16 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, return avpkt->size; } - blockstodecode = FFMIN(BLOCKS_PER_LOOP, s->samples); + blockstodecode = FFMIN(s->blocks_per_loop, s->samples); + + /* reallocate decoded sample buffer if needed */ + av_fast_malloc(&s->decoded_buffer, &s->decoded_size, + 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer)); + if (!s->decoded_buffer) + return AVERROR(ENOMEM); + memset(s->decoded_buffer, 0, s->decoded_size); + s->decoded[0] = s->decoded_buffer; + s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8); /* get output buffer */ s->frame.nb_samples = blockstodecode; @@ -927,25 +929,25 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, case 8: sample8 = (uint8_t *)s->frame.data[0]; for (i = 0; i < blockstodecode; i++) { - *sample8++ = (s->decoded0[i] + 0x80) & 0xff; + *sample8++ = (s->decoded[0][i] + 0x80) & 0xff; if (s->channels == 2) - *sample8++ = (s->decoded1[i] + 0x80) & 0xff; + *sample8++ = (s->decoded[1][i] + 0x80) & 0xff; } break; case 16: sample16 = (int16_t *)s->frame.data[0]; for (i = 0; i < blockstodecode; i++) { - *sample16++ = s->decoded0[i]; + *sample16++ = s->decoded[0][i]; if (s->channels == 2) - *sample16++ = s->decoded1[i]; + *sample16++ = s->decoded[1][i]; } break; case 24: sample24 = (int32_t *)s->frame.data[0]; for (i = 0; i < blockstodecode; i++) { - *sample24++ = s->decoded0[i] << 8; + *sample24++ = s->decoded[0][i] << 8; if (s->channels == 2) - *sample24++ = s->decoded1[i] << 8; + *sample24++ = s->decoded[1][i] << 8; } break; } @@ -964,6 +966,21 @@ static void ape_flush(AVCodecContext *avctx) s->samples= 0; } +#define OFFSET(x) offsetof(APEContext, x) +#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) +static const AVOption options[] = { + { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { 4608 }, 1, INT_MAX, PAR, "max_samples" }, + { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, + { NULL}, +}; + +static const AVClass ape_decoder_class = { + .class_name = "APE decoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + AVCodec ff_ape_decoder = { .name = "ape", .type = AVMEDIA_TYPE_AUDIO, @@ -975,4 +992,5 @@ AVCodec ff_ape_decoder = { .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .flush = ape_flush, .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .priv_class = &ape_decoder_class, }; diff --git a/libavcodec/bytestream.h b/libavcodec/bytestream.h index 71c70aac84..d315e3f818 100644 --- a/libavcodec/bytestream.h +++ b/libavcodec/bytestream.h @@ -1,6 +1,7 @@ /* * Bytestream functions * copyright (c) 2006 Baptiste Coudurier + * Copyright (c) 2012 Aneesh Dogra (lionaneesh) * * This file is part of FFmpeg. * @@ -30,6 +31,11 @@ typedef struct { const uint8_t *buffer, *buffer_end, *buffer_start; } GetByteContext; +typedef struct { + uint8_t *buffer, *buffer_end, *buffer_start; + int eof; +} PutByteContext; + #define DEF_T(type, name, bytes, read, write) \ static av_always_inline type bytestream_get_ ## name(const uint8_t **b){\ (*b) += bytes;\ @@ -39,6 +45,17 @@ static av_always_inline void bytestream_put_ ##name(uint8_t **b, const type valu write(*b, value);\ (*b) += bytes;\ }\ +static av_always_inline void bytestream2_put_ ## name ## u(PutByteContext *p, const type value)\ +{\ + bytestream_put_ ## name(&p->buffer, value);\ +}\ +static av_always_inline void bytestream2_put_ ## name(PutByteContext *p, const type value){\ + if (!p->eof && (p->buffer_end - p->buffer >= bytes)) {\ + write(p->buffer, value);\ + p->buffer += bytes;\ + } else\ + p->eof = 1;\ +}\ static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g)\ {\ return bytestream_get_ ## name(&g->buffer);\ @@ -119,22 +136,53 @@ static av_always_inline void bytestream2_init(GetByteContext *g, g->buffer_end = buf + buf_size; } +static av_always_inline void bytestream2_init_writer(PutByteContext *p, + uint8_t *buf, int buf_size) +{ + p->buffer = buf; + p->buffer_start = buf; + p->buffer_end = buf + buf_size; + p->eof = 0; +} + static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g) { return g->buffer_end - g->buffer; } +static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p) +{ + return p->buffer_end - p->buffer; +} + static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size) { g->buffer += FFMIN(g->buffer_end - g->buffer, size); } +static av_always_inline void bytestream2_skip_p(PutByteContext *p, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + p->buffer += size2; +} + static av_always_inline int bytestream2_tell(GetByteContext *g) { return (int)(g->buffer - g->buffer_start); } +static av_always_inline int bytestream2_tell_p(PutByteContext *p) +{ + return (int)(p->buffer - p->buffer_start); +} + static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence) { @@ -158,6 +206,36 @@ static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, return bytestream2_tell(g); } +static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, + int whence) +{ + p->eof = 0; + switch (whence) { + case SEEK_CUR: + if (p->buffer_end - p->buffer < offset) + p->eof = 1; + offset = av_clip(offset, -(p->buffer - p->buffer_start), + p->buffer_end - p->buffer); + p->buffer += offset; + break; + case SEEK_END: + if (offset > 0) + p->eof = 1; + offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0); + p->buffer = p->buffer_end + offset; + break; + case SEEK_SET: + if (p->buffer_end - p->buffer_start < offset) + p->eof = 1; + offset = av_clip(offset, 0, p->buffer_end - p->buffer_start); + p->buffer = p->buffer_start + offset; + break; + default: + return AVERROR(EINVAL); + } + return bytestream2_tell_p(p); +} + static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size) @@ -168,6 +246,40 @@ static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, return size2; } +static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, + const uint8_t *src, + unsigned int size) +{ + int size2; + if (p->eof) + return 0; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memcpy(p->buffer, src, size2); + p->buffer += size2; + return size2; +} + +static av_always_inline void bytestream2_set_buffer(PutByteContext *p, + const uint8_t c, + unsigned int size) +{ + int size2; + if (p->eof) + return; + size2 = FFMIN(p->buffer_end - p->buffer, size); + if (size2 != size) + p->eof = 1; + memset(p->buffer, c, size2); + p->buffer += size2; +} + +static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) +{ + return p->eof; +} + static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size) { memcpy(dst, *b, size); diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c index 370fb0070c..4728393d10 100644 --- a/libavcodec/vp8.c +++ b/libavcodec/vp8.c @@ -1561,18 +1561,6 @@ static void release_queued_segmaps(VP8Context *s, int is_close) s->maps_are_invalid = 0; } -/** - * Sets things up for skipping the current frame. - * In particular, removes it from the reference buffers. - */ -static void skipframe_clear(VP8Context *s) -{ - s->invisible = 1; - s->next_framep[VP56_FRAME_CURRENT] = NULL; - if (s->update_last) - s->next_framep[VP56_FRAME_PREVIOUS] = NULL; -} - static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { @@ -1584,7 +1572,7 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, release_queued_segmaps(s, 0); if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0) - return ret; + goto err; prev_frame = s->framep[VP56_FRAME_CURRENT]; @@ -1594,6 +1582,11 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; + if (avctx->skip_frame >= skip_thresh) { + s->invisible = 1; + memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); + goto skip_decode; + } s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh; // release no longer referenced frames @@ -1618,6 +1611,27 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n"); abort(); } + if (curframe->data[0]) + vp8_release_frame(s, curframe, 1, 0); + + // Given that arithmetic probabilities are updated every frame, it's quite likely + // that the values we have on a random interframe are complete junk if we didn't + // start decode on a keyframe. So just don't display anything rather than junk. + if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || + !s->framep[VP56_FRAME_GOLDEN] || + !s->framep[VP56_FRAME_GOLDEN2])) { + av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); + ret = AVERROR_INVALIDDATA; + goto err; + } + + curframe->key_frame = s->keyframe; + curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + curframe->reference = referenced ? 3 : 0; + if ((ret = vp8_alloc_frame(s, curframe))) { + av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n"); + goto err; + } // check if golden and altref are swapped if (s->update_altref != VP56_FRAME_NONE) { @@ -1637,36 +1651,6 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } s->next_framep[VP56_FRAME_CURRENT] = curframe; - if (avctx->skip_frame >= skip_thresh) { - skipframe_clear(s); - ret = avpkt->size; - goto skip_decode; - } - - // Given that arithmetic probabilities are updated every frame, it's quite likely - // that the values we have on a random interframe are complete junk if we didn't - // start decode on a keyframe. So just don't display anything rather than junk. - if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || - !s->framep[VP56_FRAME_GOLDEN] || - !s->framep[VP56_FRAME_GOLDEN2])) { - av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); - skipframe_clear(s); - ret = AVERROR_INVALIDDATA; - goto skip_decode; - } - - if (curframe->data[0]) - vp8_release_frame(s, curframe, 1, 0); - - curframe->key_frame = s->keyframe; - curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; - curframe->reference = referenced ? 3 : 0; - if ((ret = vp8_alloc_frame(s, curframe))) { - av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n"); - skipframe_clear(s); - goto skip_decode; - } - ff_thread_finish_setup(avctx); s->linesize = curframe->linesize[0]; @@ -1778,20 +1762,22 @@ static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, } ff_thread_report_progress(curframe, INT_MAX, 0); - ret = avpkt->size; + memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); + skip_decode: // if future frames don't use the updated probabilities, // reset them to the values we saved if (!s->update_probabilities) s->prob[0] = s->prob[1]; - memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); - if (!s->invisible) { *(AVFrame*)data = *curframe; *data_size = sizeof(AVFrame); } + return avpkt->size; +err: + memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); return ret; } diff --git a/libavcodec/x86/h264_idct_10bit.asm b/libavcodec/x86/h264_idct_10bit.asm index f94207bb11..27a18f47da 100644 --- a/libavcodec/x86/h264_idct_10bit.asm +++ b/libavcodec/x86/h264_idct_10bit.asm @@ -315,7 +315,7 @@ IDCT_ADD16INTRA_10 avx ; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8]) ;----------------------------------------------------------------------------- %macro IDCT_ADD8 1 -cglobal h264_idct_add8_10_%1,5,7 +cglobal h264_idct_add8_10_%1,5,7,7 %if ARCH_X86_64 mov r10, r0 %endif diff --git a/libswscale/Makefile b/libswscale/Makefile index b761470fd1..50e7f7fcaa 100644 --- a/libswscale/Makefile +++ b/libswscale/Makefile @@ -5,8 +5,8 @@ FFLIBS = avutil HEADERS = swscale.h -OBJS = options.o rgb2rgb.o swscale.o utils.o yuv2rgb.o \ - swscale_unscaled.o +OBJS = input.o options.o output.o rgb2rgb.o swscale.o \ + swscale_unscaled.o utils.o yuv2rgb.o OBJS-$(ARCH_BFIN) += bfin/internal_bfin.o \ bfin/swscale_bfin.o \ diff --git a/libswscale/input.c b/libswscale/input.c new file mode 100644 index 0000000000..983867779a --- /dev/null +++ b/libswscale/input.c @@ -0,0 +1,799 @@ +/* + * Copyright (C) 2001-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include "libavutil/avutil.h" +#include "libavutil/bswap.h" +#include "libavutil/cpu.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" +#include "libavutil/pixdesc.h" +#include "config.h" +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +#define RGB2YUV_SHIFT 15 +#define BY ( (int)(0.114*219/255*(1<> RGB2YUV_SHIFT; + } +} + +static av_always_inline void +rgb48ToUV_c_template(uint16_t *dstU, uint16_t *dstV, + const uint16_t *src1, const uint16_t *src2, + int width, enum PixelFormat origin) +{ + int i; + assert(src1==src2); + for (i = 0; i < width; i++) { + int r_b = input_pixel(&src1[i*3+0]); + int g = input_pixel(&src1[i*3+1]); + int b_r = input_pixel(&src1[i*3+2]); + + dstU[i] = (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + dstV[i] = (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + } +} + +static av_always_inline void +rgb48ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV, + const uint16_t *src1, const uint16_t *src2, + int width, enum PixelFormat origin) +{ + int i; + assert(src1==src2); + for (i = 0; i < width; i++) { + int r_b = (input_pixel(&src1[6 * i + 0]) + input_pixel(&src1[6 * i + 3]) + 1) >> 1; + int g = (input_pixel(&src1[6 * i + 1]) + input_pixel(&src1[6 * i + 4]) + 1) >> 1; + int b_r = (input_pixel(&src1[6 * i + 2]) + input_pixel(&src1[6 * i + 5]) + 1) >> 1; + + dstU[i]= (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + dstV[i]= (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; + } +} + +#undef r +#undef b +#undef input_pixel + +#define rgb48funcs(pattern, BE_LE, origin) \ +static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\ + int width, uint32_t *unused) \ +{ \ + const uint16_t *src = (const uint16_t *) _src; \ + uint16_t *dst = (uint16_t *) _dst; \ + rgb48ToY_c_template(dst, src, width, origin); \ +} \ + \ +static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \ + const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ + int width, uint32_t *unused) \ +{ \ + const uint16_t *src1 = (const uint16_t *) _src1, \ + *src2 = (const uint16_t *) _src2; \ + uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ + rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \ +} \ + \ +static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \ + const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ + int width, uint32_t *unused) \ +{ \ + const uint16_t *src1 = (const uint16_t *) _src1, \ + *src2 = (const uint16_t *) _src2; \ + uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ + rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \ +} + +rgb48funcs(rgb, LE, PIX_FMT_RGB48LE) +rgb48funcs(rgb, BE, PIX_FMT_RGB48BE) +rgb48funcs(bgr, LE, PIX_FMT_BGR48LE) +rgb48funcs(bgr, BE, PIX_FMT_BGR48BE) + +#define input_pixel(i) ((origin == PIX_FMT_RGBA || origin == PIX_FMT_BGRA || \ + origin == PIX_FMT_ARGB || origin == PIX_FMT_ABGR) ? AV_RN32A(&src[(i)*4]) : \ + (isBE(origin) ? AV_RB16(&src[(i)*2]) : AV_RL16(&src[(i)*2]))) + +static av_always_inline void +rgb16_32ToY_c_template(int16_t *dst, const uint8_t *src, + int width, enum PixelFormat origin, + int shr, int shg, int shb, int shp, + int maskr, int maskg, int maskb, + int rsh, int gsh, int bsh, int S) +{ + const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh; + const unsigned rnd = (32<<((S)-1)) + (1<<(S-7)); + int i; + + for (i = 0; i < width; i++) { + int px = input_pixel(i) >> shp; + int b = (px & maskb) >> shb; + int g = (px & maskg) >> shg; + int r = (px & maskr) >> shr; + + dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6); + } +} + +static av_always_inline void +rgb16_32ToUV_c_template(int16_t *dstU, int16_t *dstV, + const uint8_t *src, int width, + enum PixelFormat origin, + int shr, int shg, int shb, int shp, + int maskr, int maskg, int maskb, + int rsh, int gsh, int bsh, int S) +{ + const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh, + rv = RV << rsh, gv = GV << gsh, bv = BV << bsh; + const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7)); + int i; + + for (i = 0; i < width; i++) { + int px = input_pixel(i) >> shp; + int b = (px & maskb) >> shb; + int g = (px & maskg) >> shg; + int r = (px & maskr) >> shr; + + dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6); + dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6); + } +} + +static av_always_inline void +rgb16_32ToUV_half_c_template(int16_t *dstU, int16_t *dstV, + const uint8_t *src, int width, + enum PixelFormat origin, + int shr, int shg, int shb, int shp, + int maskr, int maskg, int maskb, + int rsh, int gsh, int bsh, int S) +{ + const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh, + rv = RV << rsh, gv = GV << gsh, bv = BV << bsh, + maskgx = ~(maskr | maskb); + const unsigned rnd = (256U<<(S)) + (1<<(S-6)); + int i; + + maskr |= maskr << 1; maskb |= maskb << 1; maskg |= maskg << 1; + for (i = 0; i < width; i++) { + int px0 = input_pixel(2 * i + 0) >> shp; + int px1 = input_pixel(2 * i + 1) >> shp; + int b, r, g = (px0 & maskgx) + (px1 & maskgx); + int rb = px0 + px1 - g; + + b = (rb & maskb) >> shb; + if (shp || origin == PIX_FMT_BGR565LE || origin == PIX_FMT_BGR565BE || + origin == PIX_FMT_RGB565LE || origin == PIX_FMT_RGB565BE) { + g >>= shg; + } else { + g = (g & maskg) >> shg; + } + r = (rb & maskr) >> shr; + + dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1); + dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1); + } +} + +#undef input_pixel + +#define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \ + maskg, maskb, rsh, gsh, bsh, S) \ +static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \ + int width, uint32_t *unused) \ +{ \ + rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, \ + shr, shg, shb, shp, \ + maskr, maskg, maskb, rsh, gsh, bsh, S); \ +} \ + \ +static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \ + const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \ + int width, uint32_t *unused) \ +{ \ + rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ + shr, shg, shb, shp, \ + maskr, maskg, maskb, rsh, gsh, bsh, S); \ +} \ + \ +static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \ + const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \ + int width, uint32_t *unused) \ +{ \ + rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ + shr, shg, shb, shp, \ + maskr, maskg, maskb, rsh, gsh, bsh, S); \ +} + +rgb16_32_wrapper(PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7) +rgb16_32_wrapper(PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT+4) +rgb16_32_wrapper(PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7) +rgb16_32_wrapper(PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT+4) +rgb16_32_wrapper(PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7) +rgb16_32_wrapper(PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT+4) +rgb16_32_wrapper(PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8) +rgb16_32_wrapper(PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7) +rgb16_32_wrapper(PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT+4) + +static void gbr24pToUV_half_c(uint16_t *dstU, uint16_t *dstV, + const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc, + int width, enum PixelFormat origin) +{ + int i; + for (i=0; i> (RGB2YUV_SHIFT-6+1); + dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1); + } +} + +static void abgrToA_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) +{ + int i; + for (i=0; i> 24)<<6; + } +} + +static void palToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, long width, uint32_t *pal) +{ + int i; + for (i=0; i> 8)<<6; + dstV[i]= (uint8_t)(p>>16)<<6; + } +} + +static void monowhite2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) +{ + int i, j; + for (i=0; i>(7-j))&1)*16383; + } + if(width&7){ + int d= ~src[i]; + for(j=0; j<(width&7); j++) + dst[8*i+j]= ((d>>(7-j))&1)*16383; + } +} + +static void monoblack2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) +{ + int i, j; + for (i=0; i>(7-j))&1)*16383; + } + if(width&7){ + int d= src[i]; + for(j=0; j<(width&7); j++) + dst[8*i+j]= ((d>>(7-j))&1)*16383; + } +} + +static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, + uint32_t *unused) +{ + int i; + for (i=0; i>(RGB2YUV_SHIFT-6)); + } +} + +static void bgr24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, + const uint8_t *src2, int width, uint32_t *unused) +{ + int i; + for (i=0; i>(RGB2YUV_SHIFT-6); + dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); + } + assert(src1 == src2); +} + +static void bgr24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, + const uint8_t *src2, int width, uint32_t *unused) +{ + int i; + for (i=0; i>(RGB2YUV_SHIFT-5); + dstV[i]= (RV*r + GV*g + BV*b + (256<>(RGB2YUV_SHIFT-5); + } + assert(src1 == src2); +} + +static void rgb24ToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, + uint32_t *unused) +{ + int i; + for (i=0; i>(RGB2YUV_SHIFT-6)); + } +} + +static void rgb24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, + const uint8_t *src2, int width, uint32_t *unused) +{ + int i; + assert(src1==src2); + for (i=0; i>(RGB2YUV_SHIFT-6); + dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); + } +} + +static void rgb24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, + const uint8_t *src2, int width, uint32_t *unused) +{ + int i; + assert(src1==src2); + for (i=0; i>(RGB2YUV_SHIFT-5); + dstV[i]= (RV*r + GV*g + BV*b + (256<>(RGB2YUV_SHIFT-5); + } +} + +static void planar_rgb_to_y(uint16_t *dst, const uint8_t *src[4], int width) +{ + int i; + for (i = 0; i < width; i++) { + int g = src[0][i]; + int b = src[1][i]; + int r = src[2][i]; + + dst[i] = (RY*r + GY*g + BY*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); + } +} + +static void planar_rgb16le_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) +{ + int i; + const uint16_t **src = (const uint16_t **) _src; + uint16_t *dst = (uint16_t *) _dst; + for (i = 0; i < width; i++) { + int g = AV_RL16(src[0] + i); + int b = AV_RL16(src[1] + i); + int r = AV_RL16(src[2] + i); + + dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + } +} + +static void planar_rgb16be_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) +{ + int i; + const uint16_t **src = (const uint16_t **) _src; + uint16_t *dst = (uint16_t *) _dst; + for (i = 0; i < width; i++) { + int g = AV_RB16(src[0] + i); + int b = AV_RB16(src[1] + i); + int r = AV_RB16(src[2] + i); + + dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); + } +} + +static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width) +{ + int i; + for (i = 0; i < width; i++) { + int g = src[0][i]; + int b = src[1][i]; + int r = src[2][i]; + + dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); + dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); + } +} + +static void planar_rgb16le_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width) +{ + int i; + const uint16_t **src = (const uint16_t **) _src; + uint16_t *dstU = (uint16_t *) _dstU; + uint16_t *dstV = (uint16_t *) _dstV; + for (i = 0; i < width; i++) { + int g = AV_RL16(src[0] + i); + int b = AV_RL16(src[1] + i); + int r = AV_RL16(src[2] + i); + + dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); + dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); + } +} + +static void planar_rgb16be_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width) +{ + int i; + const uint16_t **src = (const uint16_t **) _src; + uint16_t *dstU = (uint16_t *) _dstU; + uint16_t *dstV = (uint16_t *) _dstV; + for (i = 0; i < width; i++) { + int g = AV_RB16(src[0] + i); + int b = AV_RB16(src[1] + i); + int r = AV_RB16(src[2] + i); + + dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); + dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); + } +} + +av_cold void ff_sws_init_input_funcs(SwsContext *c) +{ + enum PixelFormat srcFormat = c->srcFormat; + + c->chrToYV12 = NULL; + switch(srcFormat) { + case PIX_FMT_YUYV422 : c->chrToYV12 = yuy2ToUV_c; break; + case PIX_FMT_UYVY422 : c->chrToYV12 = uyvyToUV_c; break; + case PIX_FMT_NV12 : c->chrToYV12 = nv12ToUV_c; break; + case PIX_FMT_NV21 : c->chrToYV12 = nv21ToUV_c; break; + case PIX_FMT_RGB8 : + case PIX_FMT_BGR8 : + case PIX_FMT_PAL8 : + case PIX_FMT_BGR4_BYTE: + case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV_c; break; + case PIX_FMT_GBRP9LE: + case PIX_FMT_GBRP10LE: + case PIX_FMT_GBRP16LE: c->readChrPlanar = planar_rgb16le_to_uv; break; + case PIX_FMT_GBRP9BE: + case PIX_FMT_GBRP10BE: + case PIX_FMT_GBRP16BE: c->readChrPlanar = planar_rgb16be_to_uv; break; + case PIX_FMT_GBRP: c->readChrPlanar = planar_rgb_to_uv; break; +#if HAVE_BIGENDIAN + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV420P9LE: + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV420P10LE: + case PIX_FMT_YUV420P16LE: + case PIX_FMT_YUV422P16LE: + case PIX_FMT_YUV444P16LE: c->chrToYV12 = bswap16UV_c; break; +#else + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV420P9BE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + case PIX_FMT_YUV420P10BE: + case PIX_FMT_YUV420P16BE: + case PIX_FMT_YUV422P16BE: + case PIX_FMT_YUV444P16BE: c->chrToYV12 = bswap16UV_c; break; +#endif + } + if (c->chrSrcHSubSample) { + switch(srcFormat) { + case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_half_c; break; + case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_half_c; break; + case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_half_c; break; + case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_half_c; break; + case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_half_c; break; + case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_half_c; break; + case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_half_c; break; + case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_half_c; break; + case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_half_c; break; + case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_half_c; break; + case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_half_c; break; + case PIX_FMT_BGR444LE: c->chrToYV12 = bgr12leToUV_half_c; break; + case PIX_FMT_BGR444BE: c->chrToYV12 = bgr12beToUV_half_c; break; + case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_half_c; break; + case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_half_c; break; + case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_half_c; break; + case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_half_c; break; + case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_half_c; break; + case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_half_c; break; + case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_half_c; break; + case PIX_FMT_GBR24P : c->chrToYV12 = gbr24pToUV_half_c; break; + case PIX_FMT_RGB444LE: c->chrToYV12 = rgb12leToUV_half_c; break; + case PIX_FMT_RGB444BE: c->chrToYV12 = rgb12beToUV_half_c; break; + } + } else { + switch(srcFormat) { + case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_c; break; + case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_c; break; + case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_c; break; + case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_c; break; + case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_c; break; + case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_c; break; + case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_c; break; + case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_c; break; + case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_c; break; + case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_c; break; + case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_c; break; + case PIX_FMT_BGR444LE: c->chrToYV12 = bgr12leToUV_c; break; + case PIX_FMT_BGR444BE: c->chrToYV12 = bgr12beToUV_c; break; + case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_c; break; + case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_c; break; + case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_c; break; + case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_c; break; + case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_c; break; + case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_c; break; + case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_c; break; + case PIX_FMT_RGB444LE: c->chrToYV12 = rgb12leToUV_c; break; + case PIX_FMT_RGB444BE: c->chrToYV12 = rgb12beToUV_c; break; + } + } + + c->lumToYV12 = NULL; + c->alpToYV12 = NULL; + switch (srcFormat) { + case PIX_FMT_GBRP9LE: + case PIX_FMT_GBRP10LE: + case PIX_FMT_GBRP16LE: c->readLumPlanar = planar_rgb16le_to_y; break; + case PIX_FMT_GBRP9BE: + case PIX_FMT_GBRP10BE: + case PIX_FMT_GBRP16BE: c->readLumPlanar = planar_rgb16be_to_y; break; + case PIX_FMT_GBRP: c->readLumPlanar = planar_rgb_to_y; break; +#if HAVE_BIGENDIAN + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV420P9LE: + case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV420P10LE: + case PIX_FMT_YUV420P16LE: + case PIX_FMT_YUV422P16LE: + case PIX_FMT_YUV444P16LE: + case PIX_FMT_GRAY16LE: c->lumToYV12 = bswap16Y_c; break; +#else + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV420P9BE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + case PIX_FMT_YUV420P10BE: + case PIX_FMT_YUV420P16BE: + case PIX_FMT_YUV422P16BE: + case PIX_FMT_YUV444P16BE: + case PIX_FMT_GRAY16BE: c->lumToYV12 = bswap16Y_c; break; +#endif + case PIX_FMT_YUYV422 : + case PIX_FMT_Y400A : c->lumToYV12 = yuy2ToY_c; break; + case PIX_FMT_UYVY422 : c->lumToYV12 = uyvyToY_c; break; + case PIX_FMT_BGR24 : c->lumToYV12 = bgr24ToY_c; break; + case PIX_FMT_BGR565LE : c->lumToYV12 = bgr16leToY_c; break; + case PIX_FMT_BGR565BE : c->lumToYV12 = bgr16beToY_c; break; + case PIX_FMT_BGR555LE : c->lumToYV12 = bgr15leToY_c; break; + case PIX_FMT_BGR555BE : c->lumToYV12 = bgr15beToY_c; break; + case PIX_FMT_BGR444LE : c->lumToYV12 = bgr12leToY_c; break; + case PIX_FMT_BGR444BE : c->lumToYV12 = bgr12beToY_c; break; + case PIX_FMT_RGB24 : c->lumToYV12 = rgb24ToY_c; break; + case PIX_FMT_RGB565LE : c->lumToYV12 = rgb16leToY_c; break; + case PIX_FMT_RGB565BE : c->lumToYV12 = rgb16beToY_c; break; + case PIX_FMT_RGB555LE : c->lumToYV12 = rgb15leToY_c; break; + case PIX_FMT_RGB555BE : c->lumToYV12 = rgb15beToY_c; break; + case PIX_FMT_RGB444LE : c->lumToYV12 = rgb12leToY_c; break; + case PIX_FMT_RGB444BE : c->lumToYV12 = rgb12beToY_c; break; + case PIX_FMT_RGB8 : + case PIX_FMT_BGR8 : + case PIX_FMT_PAL8 : + case PIX_FMT_BGR4_BYTE: + case PIX_FMT_RGB4_BYTE: c->lumToYV12 = palToY_c; break; + case PIX_FMT_MONOBLACK: c->lumToYV12 = monoblack2Y_c; break; + case PIX_FMT_MONOWHITE: c->lumToYV12 = monowhite2Y_c; break; + case PIX_FMT_RGB32 : c->lumToYV12 = bgr32ToY_c; break; + case PIX_FMT_RGB32_1: c->lumToYV12 = bgr321ToY_c; break; + case PIX_FMT_BGR32 : c->lumToYV12 = rgb32ToY_c; break; + case PIX_FMT_BGR32_1: c->lumToYV12 = rgb321ToY_c; break; + case PIX_FMT_RGB48BE: c->lumToYV12 = rgb48BEToY_c; break; + case PIX_FMT_RGB48LE: c->lumToYV12 = rgb48LEToY_c; break; + case PIX_FMT_BGR48BE: c->lumToYV12 = bgr48BEToY_c; break; + case PIX_FMT_BGR48LE: c->lumToYV12 = bgr48LEToY_c; break; + } + if (c->alpPixBuf) { + switch (srcFormat) { + case PIX_FMT_BGRA: + case PIX_FMT_RGBA: c->alpToYV12 = rgbaToA_c; break; + case PIX_FMT_ABGR: + case PIX_FMT_ARGB: c->alpToYV12 = abgrToA_c; break; + case PIX_FMT_Y400A: c->alpToYV12 = uyvyToY_c; break; + case PIX_FMT_PAL8 : c->alpToYV12 = palToA_c; break; + } + } +} diff --git a/libswscale/output.c b/libswscale/output.c new file mode 100644 index 0000000000..d0bd72b350 --- /dev/null +++ b/libswscale/output.c @@ -0,0 +1,1523 @@ +/* + * Copyright (C) 2001-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include "libavutil/avutil.h" +#include "libavutil/avassert.h" +#include "libavutil/bswap.h" +#include "libavutil/cpu.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" +#include "libavutil/pixdesc.h" +#include "config.h" +#include "rgb2rgb.h" +#include "swscale.h" +#include "swscale_internal.h" + +DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={ +{ 1, 3, 1, 3, 1, 3, 1, 3, }, +{ 2, 0, 2, 0, 2, 0, 2, 0, }, +}; + +DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={ +{ 6, 2, 6, 2, 6, 2, 6, 2, }, +{ 0, 4, 0, 4, 0, 4, 0, 4, }, +}; + +DECLARE_ALIGNED(8, const uint8_t, dither_4x4_16)[4][8]={ +{ 8, 4, 11, 7, 8, 4, 11, 7, }, +{ 2, 14, 1, 13, 2, 14, 1, 13, }, +{ 10, 6, 9, 5, 10, 6, 9, 5, }, +{ 0, 12, 3, 15, 0, 12, 3, 15, }, +}; + +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_32)[8][8]={ +{ 17, 9, 23, 15, 16, 8, 22, 14, }, +{ 5, 29, 3, 27, 4, 28, 2, 26, }, +{ 21, 13, 19, 11, 20, 12, 18, 10, }, +{ 0, 24, 6, 30, 1, 25, 7, 31, }, +{ 16, 8, 22, 14, 17, 9, 23, 15, }, +{ 4, 28, 2, 26, 5, 29, 3, 27, }, +{ 20, 12, 18, 10, 21, 13, 19, 11, }, +{ 1, 25, 7, 31, 0, 24, 6, 30, }, +}; + +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_73)[8][8]={ +{ 0, 55, 14, 68, 3, 58, 17, 72, }, +{ 37, 18, 50, 32, 40, 22, 54, 35, }, +{ 9, 64, 5, 59, 13, 67, 8, 63, }, +{ 46, 27, 41, 23, 49, 31, 44, 26, }, +{ 2, 57, 16, 71, 1, 56, 15, 70, }, +{ 39, 21, 52, 34, 38, 19, 51, 33, }, +{ 11, 66, 7, 62, 10, 65, 6, 60, }, +{ 48, 30, 43, 25, 47, 29, 42, 24, }, +}; + +#if 1 +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ +{117, 62, 158, 103, 113, 58, 155, 100, }, +{ 34, 199, 21, 186, 31, 196, 17, 182, }, +{144, 89, 131, 76, 141, 86, 127, 72, }, +{ 0, 165, 41, 206, 10, 175, 52, 217, }, +{110, 55, 151, 96, 120, 65, 162, 107, }, +{ 28, 193, 14, 179, 38, 203, 24, 189, }, +{138, 83, 124, 69, 148, 93, 134, 79, }, +{ 7, 172, 48, 213, 3, 168, 45, 210, }, +}; +#elif 1 +// tries to correct a gamma of 1.5 +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ +{ 0, 143, 18, 200, 2, 156, 25, 215, }, +{ 78, 28, 125, 64, 89, 36, 138, 74, }, +{ 10, 180, 3, 161, 16, 195, 8, 175, }, +{109, 51, 93, 38, 121, 60, 105, 47, }, +{ 1, 152, 23, 210, 0, 147, 20, 205, }, +{ 85, 33, 134, 71, 81, 30, 130, 67, }, +{ 14, 190, 6, 171, 12, 185, 5, 166, }, +{117, 57, 101, 44, 113, 54, 97, 41, }, +}; +#elif 1 +// tries to correct a gamma of 2.0 +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ +{ 0, 124, 8, 193, 0, 140, 12, 213, }, +{ 55, 14, 104, 42, 66, 19, 119, 52, }, +{ 3, 168, 1, 145, 6, 187, 3, 162, }, +{ 86, 31, 70, 21, 99, 39, 82, 28, }, +{ 0, 134, 11, 206, 0, 129, 9, 200, }, +{ 62, 17, 114, 48, 58, 16, 109, 45, }, +{ 5, 181, 2, 157, 4, 175, 1, 151, }, +{ 95, 36, 78, 26, 90, 34, 74, 24, }, +}; +#else +// tries to correct a gamma of 2.5 +DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ +{ 0, 107, 3, 187, 0, 125, 6, 212, }, +{ 39, 7, 86, 28, 49, 11, 102, 36, }, +{ 1, 158, 0, 131, 3, 180, 1, 151, }, +{ 68, 19, 52, 12, 81, 25, 64, 17, }, +{ 0, 119, 5, 203, 0, 113, 4, 195, }, +{ 45, 9, 96, 33, 42, 8, 91, 30, }, +{ 2, 172, 1, 144, 2, 165, 0, 137, }, +{ 77, 23, 60, 15, 72, 21, 56, 14, }, +}; +#endif + +#define output_pixel(pos, val, bias, signedness) \ + if (big_endian) { \ + AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ + } else { \ + AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ + } + +static av_always_inline void +yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + int i; + int shift = 3; + av_assert0(output_bits == 16); + + for (i = 0; i < dstW; i++) { + int val = src[i] + (1 << (shift - 1)); + output_pixel(&dest[i], val, 0, uint); + } +} + +static av_always_inline void +yuv2planeX_16_c_template(const int16_t *filter, int filterSize, + const int32_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + int i; + int shift = 15; + av_assert0(output_bits == 16); + + for (i = 0; i < dstW; i++) { + int val = 1 << (shift - 1); + int j; + + /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline + * filters (or anything with negative coeffs, the range can be slightly + * wider in both directions. To account for this overflow, we subtract + * a constant so it always fits in the signed range (assuming a + * reasonable filterSize), and re-add that at the end. */ + val -= 0x40000000; + for (j = 0; j < filterSize; j++) + val += src[j][i] * filter[j]; + + output_pixel(&dest[i], val, 0x8000, int); + } +} + +#undef output_pixel + +#define output_pixel(pos, val) \ + if (big_endian) { \ + AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ + } else { \ + AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ + } + +static av_always_inline void +yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + int i; + int shift = 15 - output_bits; + + for (i = 0; i < dstW; i++) { + int val = src[i] + (1 << (shift - 1)); + output_pixel(&dest[i], val); + } +} + +static av_always_inline void +yuv2planeX_10_c_template(const int16_t *filter, int filterSize, + const int16_t **src, uint16_t *dest, int dstW, + int big_endian, int output_bits) +{ + int i; + int shift = 11 + 16 - output_bits; + + for (i = 0; i < dstW; i++) { + int val = 1 << (shift - 1); + int j; + + for (j = 0; j < filterSize; j++) + val += src[j][i] * filter[j]; + + output_pixel(&dest[i], val); + } +} + +#undef output_pixel + +#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ +static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \ + uint8_t *dest, int dstW, \ + const uint8_t *dither, int offset)\ +{ \ + yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \ + (uint16_t *) dest, dstW, is_be, bits); \ +}\ +static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \ + const int16_t **src, uint8_t *dest, int dstW, \ + const uint8_t *dither, int offset)\ +{ \ + yuv2planeX_## template_size ## _c_template(filter, \ + filterSize, (const typeX_t **) src, \ + (uint16_t *) dest, dstW, is_be, bits); \ +} +yuv2NBPS( 9, BE, 1, 10, int16_t) +yuv2NBPS( 9, LE, 0, 10, int16_t) +yuv2NBPS(10, BE, 1, 10, int16_t) +yuv2NBPS(10, LE, 0, 10, int16_t) +yuv2NBPS(16, BE, 1, 16, int32_t) +yuv2NBPS(16, LE, 0, 16, int32_t) + +static void yuv2planeX_8_c(const int16_t *filter, int filterSize, + const int16_t **src, uint8_t *dest, int dstW, + const uint8_t *dither, int offset) +{ + int i; + for (i=0; i>19); + } +} + +static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, + const uint8_t *dither, int offset) +{ + int i; + for (i=0; i> 7; + dest[i]= av_clip_uint8(val); + } +} + +static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, + const int16_t **chrUSrc, const int16_t **chrVSrc, + uint8_t *dest, int chrDstW) +{ + enum PixelFormat dstFormat = c->dstFormat; + const uint8_t *chrDither = c->chrDither8; + int i; + + if (dstFormat == PIX_FMT_NV12) + for (i=0; i>19); + dest[2*i+1]= av_clip_uint8(v>>19); + } + else + for (i=0; i>19); + dest[2*i+1]= av_clip_uint8(u>>19); + } +} + +#define output_pixel(pos, val) \ + if (target == PIX_FMT_GRAY16BE) { \ + AV_WB16(pos, val); \ + } else { \ + AV_WL16(pos, val); \ + } + +static av_always_inline void +yuv2gray16_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int32_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int32_t **chrUSrc, + const int32_t **chrVSrc, int chrFilterSize, + const int32_t **alpSrc, uint16_t *dest, int dstW, + int y, enum PixelFormat target) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int j; + int Y1 = (1 << 14) - 0x40000000; + int Y2 = (1 << 14) - 0x40000000; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; + } + Y1 >>= 15; + Y2 >>= 15; + Y1 = av_clip_int16(Y1); + Y2 = av_clip_int16(Y2); + output_pixel(&dest[i * 2 + 0], 0x8000 + Y1); + output_pixel(&dest[i * 2 + 1], 0x8000 + Y2); + } +} + +static av_always_inline void +yuv2gray16_2_c_template(SwsContext *c, const int32_t *buf[2], + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf[2], uint16_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target) +{ + int yalpha1 = 4095 - yalpha; + int i; + const int32_t *buf0 = buf[0], *buf1 = buf[1]; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2 ] * yalpha1 + buf1[i * 2 ] * yalpha) >> 15; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 15; + + output_pixel(&dest[i * 2 + 0], Y1); + output_pixel(&dest[i * 2 + 1], Y2); + } +} + +static av_always_inline void +yuv2gray16_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf0, uint16_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2 ]+4)>>3; + int Y2 = (buf0[i * 2 + 1]+4)>>3; + + output_pixel(&dest[i * 2 + 0], Y1); + output_pixel(&dest[i * 2 + 1], Y2); + } +} + +#undef output_pixel + +#define YUV2PACKED16WRAPPER(name, base, ext, fmt) \ +static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **_lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **_chrUSrc, \ + const int16_t **_chrVSrc, int chrFilterSize, \ + const int16_t **_alpSrc, uint8_t *_dest, int dstW, \ + int y) \ +{ \ + const int32_t **lumSrc = (const int32_t **) _lumSrc, \ + **chrUSrc = (const int32_t **) _chrUSrc, \ + **chrVSrc = (const int32_t **) _chrVSrc, \ + **alpSrc = (const int32_t **) _alpSrc; \ + uint16_t *dest = (uint16_t *) _dest; \ + name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, y, fmt); \ +} \ + \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \ + const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ + const int16_t *_abuf[2], uint8_t *_dest, int dstW, \ + int yalpha, int uvalpha, int y) \ +{ \ + const int32_t **buf = (const int32_t **) _buf, \ + **ubuf = (const int32_t **) _ubuf, \ + **vbuf = (const int32_t **) _vbuf, \ + **abuf = (const int32_t **) _abuf; \ + uint16_t *dest = (uint16_t *) _dest; \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ + dest, dstW, yalpha, uvalpha, y, fmt); \ +} \ + \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \ + const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ + const int16_t *_abuf0, uint8_t *_dest, int dstW, \ + int uvalpha, int y) \ +{ \ + const int32_t *buf0 = (const int32_t *) _buf0, \ + **ubuf = (const int32_t **) _ubuf, \ + **vbuf = (const int32_t **) _vbuf, \ + *abuf0 = (const int32_t *) _abuf0; \ + uint16_t *dest = (uint16_t *) _dest; \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ + dstW, uvalpha, y, fmt); \ +} + +YUV2PACKED16WRAPPER(yuv2gray16,, LE, PIX_FMT_GRAY16LE) +YUV2PACKED16WRAPPER(yuv2gray16,, BE, PIX_FMT_GRAY16BE) + +#define output_pixel(pos, acc) \ + if (target == PIX_FMT_MONOBLACK) { \ + pos = acc; \ + } else { \ + pos = ~acc; \ + } + +static av_always_inline void +yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, int dstW, + int y, enum PixelFormat target) +{ + const uint8_t * const d128=dither_8x8_220[y&7]; + uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; + int i; + unsigned acc = 0; + + for (i = 0; i < dstW - 1; i += 2) { + int j; + int Y1 = 1 << 18; + int Y2 = 1 << 18; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i] * lumFilter[j]; + Y2 += lumSrc[j][i+1] * lumFilter[j]; + } + Y1 >>= 19; + Y2 >>= 19; + if ((Y1 | Y2) & 0x100) { + Y1 = av_clip_uint8(Y1); + Y2 = av_clip_uint8(Y2); + } + acc += acc + g[Y1 + d128[(i + 0) & 7]]; + acc += acc + g[Y2 + d128[(i + 1) & 7]]; + if ((i & 7) == 6) { + output_pixel(*dest++, acc); + } + } +} + +static av_always_inline void +yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target) +{ + const int16_t *buf0 = buf[0], *buf1 = buf[1]; + const uint8_t * const d128 = dither_8x8_220[y & 7]; + uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; + int yalpha1 = 4095 - yalpha; + int i; + + for (i = 0; i < dstW - 7; i += 8) { + int acc = g[((buf0[i ] * yalpha1 + buf1[i ] * yalpha) >> 19) + d128[0]]; + acc += acc + g[((buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19) + d128[1]]; + acc += acc + g[((buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19) + d128[2]]; + acc += acc + g[((buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19) + d128[3]]; + acc += acc + g[((buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19) + d128[4]]; + acc += acc + g[((buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19) + d128[5]]; + acc += acc + g[((buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19) + d128[6]]; + acc += acc + g[((buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19) + d128[7]]; + output_pixel(*dest++, acc); + } +} + +static av_always_inline void +yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) +{ + const uint8_t * const d128 = dither_8x8_220[y & 7]; + uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; + int i; + + for (i = 0; i < dstW - 7; i += 8) { + int acc = g[(buf0[i ] >> 7) + d128[0]]; + acc += acc + g[(buf0[i + 1] >> 7) + d128[1]]; + acc += acc + g[(buf0[i + 2] >> 7) + d128[2]]; + acc += acc + g[(buf0[i + 3] >> 7) + d128[3]]; + acc += acc + g[(buf0[i + 4] >> 7) + d128[4]]; + acc += acc + g[(buf0[i + 5] >> 7) + d128[5]]; + acc += acc + g[(buf0[i + 6] >> 7) + d128[6]]; + acc += acc + g[(buf0[i + 7] >> 7) + d128[7]]; + output_pixel(*dest++, acc); + } +} + +#undef output_pixel + +#define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ +static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, int dstW, \ + int y) \ +{ \ + name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, y, fmt); \ +} \ + \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf[2], uint8_t *dest, int dstW, \ + int yalpha, int uvalpha, int y) \ +{ \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ + dest, dstW, yalpha, uvalpha, y, fmt); \ +} \ + \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf0, uint8_t *dest, int dstW, \ + int uvalpha, int y) \ +{ \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \ + abuf0, dest, dstW, uvalpha, \ + y, fmt); \ +} + +YUV2PACKEDWRAPPER(yuv2mono,, white, PIX_FMT_MONOWHITE) +YUV2PACKEDWRAPPER(yuv2mono,, black, PIX_FMT_MONOBLACK) + +#define output_pixels(pos, Y1, U, Y2, V) \ + if (target == PIX_FMT_YUYV422) { \ + dest[pos + 0] = Y1; \ + dest[pos + 1] = U; \ + dest[pos + 2] = Y2; \ + dest[pos + 3] = V; \ + } else { \ + dest[pos + 0] = U; \ + dest[pos + 1] = Y1; \ + dest[pos + 2] = V; \ + dest[pos + 3] = Y2; \ + } + +static av_always_inline void +yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, int dstW, + int y, enum PixelFormat target) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int j; + int Y1 = 1 << 18; + int Y2 = 1 << 18; + int U = 1 << 18; + int V = 1 << 18; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; + } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + Y1 >>= 19; + Y2 >>= 19; + U >>= 19; + V >>= 19; + if ((Y1 | Y2 | U | V) & 0x100) { + Y1 = av_clip_uint8(Y1); + Y2 = av_clip_uint8(Y2); + U = av_clip_uint8(U); + V = av_clip_uint8(V); + } + output_pixels(4*i, Y1, U, Y2, V); + } +} + +static av_always_inline void +yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target) +{ + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int yalpha1 = 4095 - yalpha; + int uvalpha1 = 4095 - uvalpha; + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; + int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; + int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; + + output_pixels(i * 4, Y1, U, Y2, V); + } +} + +static av_always_inline void +yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) +{ + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int i; + + if (uvalpha < 2048) { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = buf0[i * 2] >> 7; + int Y2 = buf0[i * 2 + 1] >> 7; + int U = ubuf1[i] >> 7; + int V = vbuf1[i] >> 7; + + output_pixels(i * 4, Y1, U, Y2, V); + } + } else { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = buf0[i * 2] >> 7; + int Y2 = buf0[i * 2 + 1] >> 7; + int U = (ubuf0[i] + ubuf1[i]) >> 8; + int V = (vbuf0[i] + vbuf1[i]) >> 8; + + output_pixels(i * 4, Y1, U, Y2, V); + } + } +} + +#undef output_pixels + +YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, PIX_FMT_YUYV422) +YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, PIX_FMT_UYVY422) + +#define R_B ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? R : B) +#define B_R ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? B : R) +#define output_pixel(pos, val) \ + if (isBE(target)) { \ + AV_WB16(pos, val); \ + } else { \ + AV_WL16(pos, val); \ + } + +static av_always_inline void +yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int32_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int32_t **chrUSrc, + const int32_t **chrVSrc, int chrFilterSize, + const int32_t **alpSrc, uint16_t *dest, int dstW, + int y, enum PixelFormat target) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int j; + int Y1 = -0x40000000; + int Y2 = -0x40000000; + int U = -128 << 23; // 19 + int V = -128 << 23; + int R, G, B; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; + } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + + // 8bit: 12+15=27; 16-bit: 12+19=31 + Y1 >>= 14; // 10 + Y1 += 0x10000; + Y2 >>= 14; + Y2 += 0x10000; + U >>= 14; + V >>= 14; + + // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; // 21 + Y2 += 1 << 13; + // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } +} + +static av_always_inline void +yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf[2], uint16_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target) +{ + const int32_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int yalpha1 = 4095 - yalpha; + int uvalpha1 = 4095 - uvalpha; + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; + int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; + int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } +} + +static av_always_inline void +yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf[2], const int32_t *vbuf[2], + const int32_t *abuf0, uint16_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target) +{ + const int32_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int i; + + if (uvalpha < 2048) { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] ) >> 2; + int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] + (-128 << 11)) >> 2; + int V = (vbuf0[i] + (-128 << 11)) >> 2; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } + } else { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] ) >> 2; + int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; + int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; + int R, G, B; + + Y1 -= c->yuv2rgb_y_offset; + Y2 -= c->yuv2rgb_y_offset; + Y1 *= c->yuv2rgb_y_coeff; + Y2 *= c->yuv2rgb_y_coeff; + Y1 += 1 << 13; + Y2 += 1 << 13; + + R = V * c->yuv2rgb_v2r_coeff; + G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; + B = U * c->yuv2rgb_u2b_coeff; + + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); + dest += 6; + } + } +} + +#undef output_pixel +#undef r_b +#undef b_r + +YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, PIX_FMT_RGB48BE) +YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, PIX_FMT_RGB48LE) +YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, PIX_FMT_BGR48BE) +YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, PIX_FMT_BGR48LE) + +/* + * Write out 2 RGB pixels in the target pixel format. This function takes a + * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of + * things like endianness conversion and shifting. The caller takes care of + * setting the correct offset in these tables from the chroma (U/V) values. + * This function then uses the luminance (Y1/Y2) values to write out the + * correct RGB values into the destination buffer. + */ +static av_always_inline void +yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, + unsigned A1, unsigned A2, + const void *_r, const void *_g, const void *_b, int y, + enum PixelFormat target, int hasAlpha) +{ + if (target == PIX_FMT_ARGB || target == PIX_FMT_RGBA || + target == PIX_FMT_ABGR || target == PIX_FMT_BGRA) { + uint32_t *dest = (uint32_t *) _dest; + const uint32_t *r = (const uint32_t *) _r; + const uint32_t *g = (const uint32_t *) _g; + const uint32_t *b = (const uint32_t *) _b; + +#if CONFIG_SMALL + int sh = hasAlpha ? ((target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24) : 0; + + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0); + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0); +#else + if (hasAlpha) { + int sh = (target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24; + + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh); + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh); + } else { + dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1]; + dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2]; + } +#endif + } else if (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) { + uint8_t *dest = (uint8_t *) _dest; + const uint8_t *r = (const uint8_t *) _r; + const uint8_t *g = (const uint8_t *) _g; + const uint8_t *b = (const uint8_t *) _b; + +#define r_b ((target == PIX_FMT_RGB24) ? r : b) +#define b_r ((target == PIX_FMT_RGB24) ? b : r) + + dest[i * 6 + 0] = r_b[Y1]; + dest[i * 6 + 1] = g[Y1]; + dest[i * 6 + 2] = b_r[Y1]; + dest[i * 6 + 3] = r_b[Y2]; + dest[i * 6 + 4] = g[Y2]; + dest[i * 6 + 5] = b_r[Y2]; +#undef r_b +#undef b_r + } else if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565 || + target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555 || + target == PIX_FMT_RGB444 || target == PIX_FMT_BGR444) { + uint16_t *dest = (uint16_t *) _dest; + const uint16_t *r = (const uint16_t *) _r; + const uint16_t *g = (const uint16_t *) _g; + const uint16_t *b = (const uint16_t *) _b; + int dr1, dg1, db1, dr2, dg2, db2; + + if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565) { + dr1 = dither_2x2_8[ y & 1 ][0]; + dg1 = dither_2x2_4[ y & 1 ][0]; + db1 = dither_2x2_8[(y & 1) ^ 1][0]; + dr2 = dither_2x2_8[ y & 1 ][1]; + dg2 = dither_2x2_4[ y & 1 ][1]; + db2 = dither_2x2_8[(y & 1) ^ 1][1]; + } else if (target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555) { + dr1 = dither_2x2_8[ y & 1 ][0]; + dg1 = dither_2x2_8[ y & 1 ][1]; + db1 = dither_2x2_8[(y & 1) ^ 1][0]; + dr2 = dither_2x2_8[ y & 1 ][1]; + dg2 = dither_2x2_8[ y & 1 ][0]; + db2 = dither_2x2_8[(y & 1) ^ 1][1]; + } else { + dr1 = dither_4x4_16[ y & 3 ][0]; + dg1 = dither_4x4_16[ y & 3 ][1]; + db1 = dither_4x4_16[(y & 3) ^ 3][0]; + dr2 = dither_4x4_16[ y & 3 ][1]; + dg2 = dither_4x4_16[ y & 3 ][0]; + db2 = dither_4x4_16[(y & 3) ^ 3][1]; + } + + dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; + dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; + } else /* 8/4-bit */ { + uint8_t *dest = (uint8_t *) _dest; + const uint8_t *r = (const uint8_t *) _r; + const uint8_t *g = (const uint8_t *) _g; + const uint8_t *b = (const uint8_t *) _b; + int dr1, dg1, db1, dr2, dg2, db2; + + if (target == PIX_FMT_RGB8 || target == PIX_FMT_BGR8) { + const uint8_t * const d64 = dither_8x8_73[y & 7]; + const uint8_t * const d32 = dither_8x8_32[y & 7]; + dr1 = dg1 = d32[(i * 2 + 0) & 7]; + db1 = d64[(i * 2 + 0) & 7]; + dr2 = dg2 = d32[(i * 2 + 1) & 7]; + db2 = d64[(i * 2 + 1) & 7]; + } else { + const uint8_t * const d64 = dither_8x8_73 [y & 7]; + const uint8_t * const d128 = dither_8x8_220[y & 7]; + dr1 = db1 = d128[(i * 2 + 0) & 7]; + dg1 = d64[(i * 2 + 0) & 7]; + dr2 = db2 = d128[(i * 2 + 1) & 7]; + dg2 = d64[(i * 2 + 1) & 7]; + } + + if (target == PIX_FMT_RGB4 || target == PIX_FMT_BGR4) { + dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] + + ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4); + } else { + dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; + dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; + } + } +} + +static av_always_inline void +yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, int dstW, + int y, enum PixelFormat target, int hasAlpha) +{ + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int j; + int Y1 = 1 << 18; + int Y2 = 1 << 18; + int U = 1 << 18; + int V = 1 << 18; + int av_unused A1, A2; + const void *r, *g, *b; + + for (j = 0; j < lumFilterSize; j++) { + Y1 += lumSrc[j][i * 2] * lumFilter[j]; + Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; + } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + Y1 >>= 19; + Y2 >>= 19; + U >>= 19; + V >>= 19; + if (hasAlpha) { + A1 = 1 << 18; + A2 = 1 << 18; + for (j = 0; j < lumFilterSize; j++) { + A1 += alpSrc[j][i * 2 ] * lumFilter[j]; + A2 += alpSrc[j][i * 2 + 1] * lumFilter[j]; + } + A1 >>= 19; + A2 >>= 19; + if ((A1 | A2) & 0x100) { + A1 = av_clip_uint8(A1); + A2 = av_clip_uint8(A2); + } + } + + r = c->table_rV[V + YUVRGB_TABLE_HEADROOM]; + g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]); + b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; + + yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); + } +} + +static av_always_inline void +yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf[2], uint8_t *dest, int dstW, + int yalpha, int uvalpha, int y, + enum PixelFormat target, int hasAlpha) +{ + const int16_t *buf0 = buf[0], *buf1 = buf[1], + *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], + *abuf0 = hasAlpha ? abuf[0] : NULL, + *abuf1 = hasAlpha ? abuf[1] : NULL; + int yalpha1 = 4095 - yalpha; + int uvalpha1 = 4095 - uvalpha; + int i; + + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; + int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; + int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; + int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; + int A1, A2; + const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], + *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), + *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; + + if (hasAlpha) { + A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19; + A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19; + } + + yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); + } +} + +static av_always_inline void +yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, + const int16_t *ubuf[2], const int16_t *vbuf[2], + const int16_t *abuf0, uint8_t *dest, int dstW, + int uvalpha, int y, enum PixelFormat target, + int hasAlpha) +{ + const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], + *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; + int i; + + if (uvalpha < 2048) { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = buf0[i * 2] >> 7; + int Y2 = buf0[i * 2 + 1] >> 7; + int U = ubuf1[i] >> 7; + int V = vbuf1[i] >> 7; + int A1, A2; + const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], + *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), + *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; + + if (hasAlpha) { + A1 = abuf0[i * 2 ] >> 7; + A2 = abuf0[i * 2 + 1] >> 7; + } + + yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); + } + } else { + for (i = 0; i < (dstW >> 1); i++) { + int Y1 = buf0[i * 2] >> 7; + int Y2 = buf0[i * 2 + 1] >> 7; + int U = (ubuf0[i] + ubuf1[i]) >> 8; + int V = (vbuf0[i] + vbuf1[i]) >> 8; + int A1, A2; + const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], + *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), + *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; + + if (hasAlpha) { + A1 = abuf0[i * 2 ] >> 7; + A2 = abuf0[i * 2 + 1] >> 7; + } + + yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, + r, g, b, y, target, hasAlpha); + } + } +} + +#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ +static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ + const int16_t **lumSrc, int lumFilterSize, \ + const int16_t *chrFilter, const int16_t **chrUSrc, \ + const int16_t **chrVSrc, int chrFilterSize, \ + const int16_t **alpSrc, uint8_t *dest, int dstW, \ + int y) \ +{ \ + name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ + chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ + alpSrc, dest, dstW, y, fmt, hasAlpha); \ +} +#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ +YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ +static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf[2], uint8_t *dest, int dstW, \ + int yalpha, int uvalpha, int y) \ +{ \ + name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ + dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ +} \ + \ +static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ + const int16_t *ubuf[2], const int16_t *vbuf[2], \ + const int16_t *abuf0, uint8_t *dest, int dstW, \ + int uvalpha, int y) \ +{ \ + name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ + dstW, uvalpha, y, fmt, hasAlpha); \ +} + +#if CONFIG_SMALL +YUV2RGBWRAPPER(yuv2rgb,, 32_1, PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +YUV2RGBWRAPPER(yuv2rgb,, 32, PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +#else +#if CONFIG_SWSCALE_ALPHA +YUV2RGBWRAPPER(yuv2rgb,, a32_1, PIX_FMT_RGB32_1, 1) +YUV2RGBWRAPPER(yuv2rgb,, a32, PIX_FMT_RGB32, 1) +#endif +YUV2RGBWRAPPER(yuv2rgb,, x32_1, PIX_FMT_RGB32_1, 0) +YUV2RGBWRAPPER(yuv2rgb,, x32, PIX_FMT_RGB32, 0) +#endif +YUV2RGBWRAPPER(yuv2, rgb, rgb24, PIX_FMT_RGB24, 0) +YUV2RGBWRAPPER(yuv2, rgb, bgr24, PIX_FMT_BGR24, 0) +YUV2RGBWRAPPER(yuv2rgb,, 16, PIX_FMT_RGB565, 0) +YUV2RGBWRAPPER(yuv2rgb,, 15, PIX_FMT_RGB555, 0) +YUV2RGBWRAPPER(yuv2rgb,, 12, PIX_FMT_RGB444, 0) +YUV2RGBWRAPPER(yuv2rgb,, 8, PIX_FMT_RGB8, 0) +YUV2RGBWRAPPER(yuv2rgb,, 4, PIX_FMT_RGB4, 0) +YUV2RGBWRAPPER(yuv2rgb,, 4b, PIX_FMT_RGB4_BYTE, 0) + +static av_always_inline void +yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, + const int16_t **lumSrc, int lumFilterSize, + const int16_t *chrFilter, const int16_t **chrUSrc, + const int16_t **chrVSrc, int chrFilterSize, + const int16_t **alpSrc, uint8_t *dest, + int dstW, int y, enum PixelFormat target, int hasAlpha) +{ + int i; + int step = (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) ? 3 : 4; + + for (i = 0; i < dstW; i++) { + int j; + int Y = 1<<9; + int U = (1<<9)-(128 << 19); + int V = (1<<9)-(128 << 19); + int av_unused A; + int R, G, B; + + for (j = 0; j < lumFilterSize; j++) { + Y += lumSrc[j][i] * lumFilter[j]; + } + for (j = 0; j < chrFilterSize; j++) { + U += chrUSrc[j][i] * chrFilter[j]; + V += chrVSrc[j][i] * chrFilter[j]; + } + Y >>= 10; + U >>= 10; + V >>= 10; + if (hasAlpha) { + A = 1 << 18; + for (j = 0; j < lumFilterSize; j++) { + A += alpSrc[j][i] * lumFilter[j]; + } + A >>= 19; + if (A & 0x100) + A = av_clip_uint8(A); + } + Y -= c->yuv2rgb_y_offset; + Y *= c->yuv2rgb_y_coeff; + Y += 1 << 21; + R = Y + V*c->yuv2rgb_v2r_coeff; + G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff; + B = Y + U*c->yuv2rgb_u2b_coeff; + if ((R | G | B) & 0xC0000000) { + R = av_clip_uintp2(R, 30); + G = av_clip_uintp2(G, 30); + B = av_clip_uintp2(B, 30); + } + + switch(target) { + case PIX_FMT_ARGB: + dest[0] = hasAlpha ? A : 255; + dest[1] = R >> 22; + dest[2] = G >> 22; + dest[3] = B >> 22; + break; + case PIX_FMT_RGB24: + dest[0] = R >> 22; + dest[1] = G >> 22; + dest[2] = B >> 22; + break; + case PIX_FMT_RGBA: + dest[0] = R >> 22; + dest[1] = G >> 22; + dest[2] = B >> 22; + dest[3] = hasAlpha ? A : 255; + break; + case PIX_FMT_ABGR: + dest[0] = hasAlpha ? A : 255; + dest[1] = B >> 22; + dest[2] = G >> 22; + dest[3] = R >> 22; + break; + case PIX_FMT_BGR24: + dest[0] = B >> 22; + dest[1] = G >> 22; + dest[2] = R >> 22; + break; + case PIX_FMT_BGRA: + dest[0] = B >> 22; + dest[1] = G >> 22; + dest[2] = R >> 22; + dest[3] = hasAlpha ? A : 255; + break; + } + dest += step; + } +} + +#if CONFIG_SMALL +YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) +#else +#if CONFIG_SWSCALE_ALPHA +YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, 1) +YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, 1) +YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, 1) +YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, 1) +#endif +YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, PIX_FMT_BGRA, 0) +YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, PIX_FMT_ABGR, 0) +YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, PIX_FMT_RGBA, 0) +YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, PIX_FMT_ARGB, 0) +#endif +YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, PIX_FMT_BGR24, 0) +YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, PIX_FMT_RGB24, 0) + +void ff_sws_init_output_funcs(SwsContext *c, + yuv2planar1_fn *yuv2plane1, + yuv2planarX_fn *yuv2planeX, + yuv2interleavedX_fn *yuv2nv12cX, + yuv2packed1_fn *yuv2packed1, + yuv2packed2_fn *yuv2packed2, + yuv2packedX_fn *yuv2packedX) +{ + enum PixelFormat dstFormat = c->dstFormat; + + if (is16BPS(dstFormat)) { + *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c; + *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c; + } else if (is9_OR_10BPS(dstFormat)) { + if (av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1 == 8) { + *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c; + *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c; + } else { + *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c; + *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c; + } + } else { + *yuv2plane1 = yuv2plane1_8_c; + *yuv2planeX = yuv2planeX_8_c; + if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) + *yuv2nv12cX = yuv2nv12cX_c; + } + + if(c->flags & SWS_FULL_CHR_H_INT) { + switch (dstFormat) { + case PIX_FMT_RGBA: +#if CONFIG_SMALL + *yuv2packedX = yuv2rgba32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2rgba32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2rgbx32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_ARGB: +#if CONFIG_SMALL + *yuv2packedX = yuv2argb32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2argb32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2xrgb32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_BGRA: +#if CONFIG_SMALL + *yuv2packedX = yuv2bgra32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2bgra32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2bgrx32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_ABGR: +#if CONFIG_SMALL + *yuv2packedX = yuv2abgr32_full_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packedX = yuv2abgr32_full_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packedX = yuv2xbgr32_full_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB24: + *yuv2packedX = yuv2rgb24_full_X_c; + break; + case PIX_FMT_BGR24: + *yuv2packedX = yuv2bgr24_full_X_c; + break; + } + if(!*yuv2packedX) + goto YUV_PACKED; + } else { + YUV_PACKED: + switch (dstFormat) { + case PIX_FMT_RGB48LE: + *yuv2packed1 = yuv2rgb48le_1_c; + *yuv2packed2 = yuv2rgb48le_2_c; + *yuv2packedX = yuv2rgb48le_X_c; + break; + case PIX_FMT_RGB48BE: + *yuv2packed1 = yuv2rgb48be_1_c; + *yuv2packed2 = yuv2rgb48be_2_c; + *yuv2packedX = yuv2rgb48be_X_c; + break; + case PIX_FMT_BGR48LE: + *yuv2packed1 = yuv2bgr48le_1_c; + *yuv2packed2 = yuv2bgr48le_2_c; + *yuv2packedX = yuv2bgr48le_X_c; + break; + case PIX_FMT_BGR48BE: + *yuv2packed1 = yuv2bgr48be_1_c; + *yuv2packed2 = yuv2bgr48be_2_c; + *yuv2packedX = yuv2bgr48be_X_c; + break; + case PIX_FMT_RGB32: + case PIX_FMT_BGR32: +#if CONFIG_SMALL + *yuv2packed1 = yuv2rgb32_1_c; + *yuv2packed2 = yuv2rgb32_2_c; + *yuv2packedX = yuv2rgb32_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packed1 = yuv2rgba32_1_c; + *yuv2packed2 = yuv2rgba32_2_c; + *yuv2packedX = yuv2rgba32_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packed1 = yuv2rgbx32_1_c; + *yuv2packed2 = yuv2rgbx32_2_c; + *yuv2packedX = yuv2rgbx32_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB32_1: + case PIX_FMT_BGR32_1: +#if CONFIG_SMALL + *yuv2packed1 = yuv2rgb32_1_1_c; + *yuv2packed2 = yuv2rgb32_1_2_c; + *yuv2packedX = yuv2rgb32_1_X_c; +#else +#if CONFIG_SWSCALE_ALPHA + if (c->alpPixBuf) { + *yuv2packed1 = yuv2rgba32_1_1_c; + *yuv2packed2 = yuv2rgba32_1_2_c; + *yuv2packedX = yuv2rgba32_1_X_c; + } else +#endif /* CONFIG_SWSCALE_ALPHA */ + { + *yuv2packed1 = yuv2rgbx32_1_1_c; + *yuv2packed2 = yuv2rgbx32_1_2_c; + *yuv2packedX = yuv2rgbx32_1_X_c; + } +#endif /* !CONFIG_SMALL */ + break; + case PIX_FMT_RGB24: + *yuv2packed1 = yuv2rgb24_1_c; + *yuv2packed2 = yuv2rgb24_2_c; + *yuv2packedX = yuv2rgb24_X_c; + break; + case PIX_FMT_BGR24: + *yuv2packed1 = yuv2bgr24_1_c; + *yuv2packed2 = yuv2bgr24_2_c; + *yuv2packedX = yuv2bgr24_X_c; + break; + case PIX_FMT_RGB565LE: + case PIX_FMT_RGB565BE: + case PIX_FMT_BGR565LE: + case PIX_FMT_BGR565BE: + *yuv2packed1 = yuv2rgb16_1_c; + *yuv2packed2 = yuv2rgb16_2_c; + *yuv2packedX = yuv2rgb16_X_c; + break; + case PIX_FMT_RGB555LE: + case PIX_FMT_RGB555BE: + case PIX_FMT_BGR555LE: + case PIX_FMT_BGR555BE: + *yuv2packed1 = yuv2rgb15_1_c; + *yuv2packed2 = yuv2rgb15_2_c; + *yuv2packedX = yuv2rgb15_X_c; + break; + case PIX_FMT_RGB444LE: + case PIX_FMT_RGB444BE: + case PIX_FMT_BGR444LE: + case PIX_FMT_BGR444BE: + *yuv2packed1 = yuv2rgb12_1_c; + *yuv2packed2 = yuv2rgb12_2_c; + *yuv2packedX = yuv2rgb12_X_c; + break; + case PIX_FMT_RGB8: + case PIX_FMT_BGR8: + *yuv2packed1 = yuv2rgb8_1_c; + *yuv2packed2 = yuv2rgb8_2_c; + *yuv2packedX = yuv2rgb8_X_c; + break; + case PIX_FMT_RGB4: + case PIX_FMT_BGR4: + *yuv2packed1 = yuv2rgb4_1_c; + *yuv2packed2 = yuv2rgb4_2_c; + *yuv2packedX = yuv2rgb4_X_c; + break; + case PIX_FMT_RGB4_BYTE: + case PIX_FMT_BGR4_BYTE: + *yuv2packed1 = yuv2rgb4b_1_c; + *yuv2packed2 = yuv2rgb4b_2_c; + *yuv2packedX = yuv2rgb4b_X_c; + break; + } + } + switch (dstFormat) { + case PIX_FMT_GRAY16BE: + *yuv2packed1 = yuv2gray16BE_1_c; + *yuv2packed2 = yuv2gray16BE_2_c; + *yuv2packedX = yuv2gray16BE_X_c; + break; + case PIX_FMT_GRAY16LE: + *yuv2packed1 = yuv2gray16LE_1_c; + *yuv2packed2 = yuv2gray16LE_2_c; + *yuv2packedX = yuv2gray16LE_X_c; + break; + case PIX_FMT_MONOWHITE: + *yuv2packed1 = yuv2monowhite_1_c; + *yuv2packed2 = yuv2monowhite_2_c; + *yuv2packedX = yuv2monowhite_X_c; + break; + case PIX_FMT_MONOBLACK: + *yuv2packed1 = yuv2monoblack_1_c; + *yuv2packed2 = yuv2monoblack_2_c; + *yuv2packedX = yuv2monoblack_X_c; + break; + case PIX_FMT_YUYV422: + *yuv2packed1 = yuv2yuyv422_1_c; + *yuv2packed2 = yuv2yuyv422_2_c; + *yuv2packedX = yuv2yuyv422_X_c; + break; + case PIX_FMT_UYVY422: + *yuv2packed1 = yuv2uyvy422_1_c; + *yuv2packed2 = yuv2uyvy422_2_c; + *yuv2packedX = yuv2uyvy422_X_c; + break; + } +} diff --git a/libswscale/sparc/yuv2rgb_vis.c b/libswscale/sparc/yuv2rgb_vis.c index cc98f04053..62f502619c 100644 --- a/libswscale/sparc/yuv2rgb_vis.c +++ b/libswscale/sparc/yuv2rgb_vis.c @@ -25,149 +25,148 @@ #include "libswscale/swscale.h" #include "libswscale/swscale_internal.h" -#define YUV2RGB_INIT \ - "wr %%g0, 0x10, %%gsr \n\t" \ - "ldd [%5], %%f32 \n\t" \ - "ldd [%5+8], %%f34 \n\t" \ - "ldd [%5+16], %%f36 \n\t" \ - "ldd [%5+24], %%f38 \n\t" \ - "ldd [%5+32], %%f40 \n\t" \ - "ldd [%5+40], %%f42 \n\t" \ - "ldd [%5+48], %%f44 \n\t" \ - "ldd [%5+56], %%f46 \n\t" \ - "ldd [%5+64], %%f48 \n\t" \ - "ldd [%5+72], %%f50 \n\t" - -#define YUV2RGB_KERNEL \ - /* ^^^^ f0=Y f3=u f5=v */ \ - "fmul8x16 %%f3, %%f48, %%f6 \n\t" \ - "fmul8x16 %%f19, %%f48, %%f22 \n\t" \ - "fmul8x16 %%f5, %%f44, %%f8 \n\t" \ - "fmul8x16 %%f21, %%f44, %%f24 \n\t" \ - "fmul8x16 %%f0, %%f42, %%f0 \n\t" \ - "fmul8x16 %%f16, %%f42, %%f16 \n\t" \ - "fmul8x16 %%f3, %%f50, %%f2 \n\t" \ - "fmul8x16 %%f19, %%f50, %%f18 \n\t" \ - "fmul8x16 %%f5, %%f46, %%f4 \n\t" \ - "fmul8x16 %%f21, %%f46, %%f20 \n\t" \ - \ - "fpsub16 %%f6, %%f34, %%f6 \n\t" /* 1 */ \ - "fpsub16 %%f22, %%f34, %%f22 \n\t" /* 1 */ \ - "fpsub16 %%f8, %%f38, %%f8 \n\t" /* 3 */ \ - "fpsub16 %%f24, %%f38, %%f24 \n\t" /* 3 */ \ - "fpsub16 %%f0, %%f32, %%f0 \n\t" /* 0 */ \ - "fpsub16 %%f16, %%f32, %%f16 \n\t" /* 0 */ \ - "fpsub16 %%f2, %%f36, %%f2 \n\t" /* 2 */ \ - "fpsub16 %%f18, %%f36, %%f18 \n\t" /* 2 */ \ - "fpsub16 %%f4, %%f40, %%f4 \n\t" /* 4 */ \ - "fpsub16 %%f20, %%f40, %%f20 \n\t" /* 4 */ \ - \ - "fpadd16 %%f0, %%f8, %%f8 \n\t" /* Gt */ \ - "fpadd16 %%f16, %%f24, %%f24 \n\t" /* Gt */ \ - "fpadd16 %%f0, %%f4, %%f4 \n\t" /* R */ \ - "fpadd16 %%f16, %%f20, %%f20 \n\t" /* R */ \ - "fpadd16 %%f0, %%f6, %%f6 \n\t" /* B */ \ - "fpadd16 %%f16, %%f22, %%f22 \n\t" /* B */ \ - "fpadd16 %%f8, %%f2, %%f2 \n\t" /* G */ \ - "fpadd16 %%f24, %%f18, %%f18 \n\t" /* G */ \ - \ - "fpack16 %%f4, %%f4 \n\t" \ - "fpack16 %%f20, %%f20 \n\t" \ - "fpack16 %%f6, %%f6 \n\t" \ - "fpack16 %%f22, %%f22 \n\t" \ - "fpack16 %%f2, %%f2 \n\t" \ - "fpack16 %%f18, %%f18 \n\t" - +#define YUV2RGB_INIT \ + "wr %%g0, 0x10, %%gsr \n\t" \ + "ldd [%5], %%f32 \n\t" \ + "ldd [%5 + 8], %%f34 \n\t" \ + "ldd [%5 + 16], %%f36 \n\t" \ + "ldd [%5 + 24], %%f38 \n\t" \ + "ldd [%5 + 32], %%f40 \n\t" \ + "ldd [%5 + 40], %%f42 \n\t" \ + "ldd [%5 + 48], %%f44 \n\t" \ + "ldd [%5 + 56], %%f46 \n\t" \ + "ldd [%5 + 64], %%f48 \n\t" \ + "ldd [%5 + 72], %%f50 \n\t" +#define YUV2RGB_KERNEL \ + /* ^^^^ f0=Y f3=u f5=v */ \ + "fmul8x16 %%f3, %%f48, %%f6 \n\t" \ + "fmul8x16 %%f19, %%f48, %%f22 \n\t" \ + "fmul8x16 %%f5, %%f44, %%f8 \n\t" \ + "fmul8x16 %%f21, %%f44, %%f24 \n\t" \ + "fmul8x16 %%f0, %%f42, %%f0 \n\t" \ + "fmul8x16 %%f16, %%f42, %%f16 \n\t" \ + "fmul8x16 %%f3, %%f50, %%f2 \n\t" \ + "fmul8x16 %%f19, %%f50, %%f18 \n\t" \ + "fmul8x16 %%f5, %%f46, %%f4 \n\t" \ + "fmul8x16 %%f21, %%f46, %%f20 \n\t" \ + \ + "fpsub16 %%f6, %%f34, %%f6 \n\t" /* 1 */ \ + "fpsub16 %%f22, %%f34, %%f22 \n\t" /* 1 */ \ + "fpsub16 %%f8, %%f38, %%f8 \n\t" /* 3 */ \ + "fpsub16 %%f24, %%f38, %%f24 \n\t" /* 3 */ \ + "fpsub16 %%f0, %%f32, %%f0 \n\t" /* 0 */ \ + "fpsub16 %%f16, %%f32, %%f16 \n\t" /* 0 */ \ + "fpsub16 %%f2, %%f36, %%f2 \n\t" /* 2 */ \ + "fpsub16 %%f18, %%f36, %%f18 \n\t" /* 2 */ \ + "fpsub16 %%f4, %%f40, %%f4 \n\t" /* 4 */ \ + "fpsub16 %%f20, %%f40, %%f20 \n\t" /* 4 */ \ + \ + "fpadd16 %%f0, %%f8, %%f8 \n\t" /* Gt */ \ + "fpadd16 %%f16, %%f24, %%f24 \n\t" /* Gt */ \ + "fpadd16 %%f0, %%f4, %%f4 \n\t" /* R */ \ + "fpadd16 %%f16, %%f20, %%f20 \n\t" /* R */ \ + "fpadd16 %%f0, %%f6, %%f6 \n\t" /* B */ \ + "fpadd16 %%f16, %%f22, %%f22 \n\t" /* B */ \ + "fpadd16 %%f8, %%f2, %%f2 \n\t" /* G */ \ + "fpadd16 %%f24, %%f18, %%f18 \n\t" /* G */ \ + \ + "fpack16 %%f4, %%f4 \n\t" \ + "fpack16 %%f20, %%f20 \n\t" \ + "fpack16 %%f6, %%f6 \n\t" \ + "fpack16 %%f22, %%f22 \n\t" \ + "fpack16 %%f2, %%f2 \n\t" \ + "fpack16 %%f18, %%f18 \n\t" // FIXME: must be changed to set alpha to 255 instead of 0 -static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int vis_420P_ARGB32(SwsContext *c, uint8_t *src[], int srcStride[], + int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) { int y, out1, out2, out3, out4, out5, out6; - for(y=0;y < srcSliceH;++y) { + for (y = 0; y < srcSliceH; ++y) __asm__ volatile ( YUV2RGB_INIT - "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ + "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "1: \n\t" - "ldda [%1] %%asi, %%f2 \n\t" - "ldda [%1+2] %%asi, %%f18 \n\t" - "ldda [%2] %%asi, %%f4 \n\t" - "ldda [%2+2] %%asi, %%f20 \n\t" + "ldda [%1] %%asi, %%f2 \n\t" + "ldda [%1 + 2] %%asi, %%f18 \n\t" + "ldda [%2] %%asi, %%f4 \n\t" + "ldda [%2 + 2] %%asi, %%f20 \n\t" "ld [%0], %%f0 \n\t" "ld [%0+4], %%f16 \n\t" - "fpmerge %%f3, %%f3, %%f2 \n\t" + "fpmerge %%f3, %%f3, %%f2 \n\t" "fpmerge %%f19, %%f19, %%f18 \n\t" - "fpmerge %%f5, %%f5, %%f4 \n\t" + "fpmerge %%f5, %%f5, %%f4 \n\t" "fpmerge %%f21, %%f21, %%f20 \n\t" YUV2RGB_KERNEL "fzero %%f0 \n\t" - "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1 - "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1 - "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2 - "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2 - "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb - "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb - "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb - "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb - "std %%f4, [%3] \n\t" - "std %%f20, [%3+16] \n\t" - "std %%f6, [%3+8] \n\t" - "std %%f22, [%3+24] \n\t" + "fpmerge %%f4, %%f6, %%f8 \n\t" // r, b, t1 + "fpmerge %%f20, %%f22, %%f24 \n\t" // r, b, t1 + "fpmerge %%f0, %%f2, %%f10 \n\t" // 0, g, t2 + "fpmerge %%f0, %%f18, %%f26 \n\t" // 0, g, t2 + "fpmerge %%f10, %%f8, %%f4 \n\t" // t2, t1, msb + "fpmerge %%f26, %%f24, %%f20 \n\t" // t2, t1, msb + "fpmerge %%f11, %%f9, %%f6 \n\t" // t2, t1, lsb + "fpmerge %%f27, %%f25, %%f22 \n\t" // t2, t1, lsb + "std %%f4, [%3] \n\t" + "std %%f20, [%3 + 16] \n\t" + "std %%f6, [%3 + 8] \n\t" + "std %%f22, [%3 + 24] \n\t" "add %0, 8, %0 \n\t" "add %1, 4, %1 \n\t" "add %2, 4, %2 \n\t" "subcc %4, 8, %4 \n\t" "bne 1b \n\t" - "add %3, 32, %3 \n\t" //delay slot + "add %3, 32, %3 \n\t" // delay slot : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6) - : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+((y+srcSliceY)>>1)*srcStride[1]), - "2" (src[2]+((y+srcSliceY)>>1)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]), - "4" (c->dstW), - "5" (c->sparc_coeffs) - ); - } + : "0" (src[0] + (y + srcSliceY) * srcStride[0]), "1" (src[1] + ((y + srcSliceY) >> 1) * srcStride[1]), + "2" (src[2] + ((y + srcSliceY) >> 1) * srcStride[2]), "3" (dst[0] + (y + srcSliceY) * dstStride[0]), + "4" (c->dstW), + "5" (c->sparc_coeffs) + ); return srcSliceH; } // FIXME: must be changed to set alpha to 255 instead of 0 -static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dst[], int dstStride[]) +static int vis_422P_ARGB32(SwsContext *c, uint8_t *src[], int srcStride[], + int srcSliceY, int srcSliceH, + uint8_t *dst[], int dstStride[]) { int y, out1, out2, out3, out4, out5, out6; - for(y=0;y < srcSliceH;++y) { + for (y = 0; y < srcSliceH; ++y) __asm__ volatile ( YUV2RGB_INIT "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "1: \n\t" - "ldda [%1] %%asi, %%f2 \n\t" - "ldda [%1+2] %%asi, %%f18 \n\t" - "ldda [%2] %%asi, %%f4 \n\t" - "ldda [%2+2] %%asi, %%f20 \n\t" - "ld [%0], %%f0 \n\t" - "ld [%0+4], %%f16 \n\t" - "fpmerge %%f3, %%f3, %%f2 \n\t" + "ldda [%1] %%asi, %%f2 \n\t" + "ldda [%1 + 2] %%asi, %%f18 \n\t" + "ldda [%2] %%asi, %%f4 \n\t" + "ldda [%2 + 2] %%asi, %%f20 \n\t" + "ld [%0], %%f0 \n\t" + "ld [%0 + 4], %%f16 \n\t" + "fpmerge %%f3, %%f3, %%f2 \n\t" "fpmerge %%f19, %%f19, %%f18 \n\t" - "fpmerge %%f5, %%f5, %%f4 \n\t" + "fpmerge %%f5, %%f5, %%f4 \n\t" "fpmerge %%f21, %%f21, %%f20 \n\t" YUV2RGB_KERNEL "fzero %%f0 \n\t" - "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1 + "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1 "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1 - "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2 - "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2 - "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb + "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2 + "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2 + "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb - "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb + "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb - "std %%f4, [%3] \n\t" - "std %%f20, [%3+16] \n\t" - "std %%f6, [%3+8] \n\t" - "std %%f22, [%3+24] \n\t" + "std %%f4, [%3] \n\t" + "std %%f20, [%3 + 16] \n\t" + "std %%f6, [%3 + 8] \n\t" + "std %%f22, [%3 + 24] \n\t" "add %0, 8, %0 \n\t" "add %1, 4, %1 \n\t" @@ -176,36 +175,36 @@ static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int s "bne 1b \n\t" "add %3, 32, %3 \n\t" //delay slot : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6) - : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+(y+srcSliceY)*srcStride[1]), - "2" (src[2]+(y+srcSliceY)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]), - "4" (c->dstW), - "5" (c->sparc_coeffs) - ); - } + : "0" (src[0] + (y + srcSliceY) * srcStride[0]), "1" (src[1] + (y + srcSliceY) * srcStride[1]), + "2" (src[2] + (y + srcSliceY) * srcStride[2]), "3" (dst[0] + (y + srcSliceY) * dstStride[0]), + "4" (c->dstW), + "5" (c->sparc_coeffs) + ); return srcSliceH; } SwsFunc ff_yuv2rgb_init_vis(SwsContext *c) { - c->sparc_coeffs[5]=c->yCoeff; - c->sparc_coeffs[6]=c->vgCoeff; - c->sparc_coeffs[7]=c->vrCoeff; - c->sparc_coeffs[8]=c->ubCoeff; - c->sparc_coeffs[9]=c->ugCoeff; + c->sparc_coeffs[5] = c->yCoeff; + c->sparc_coeffs[6] = c->vgCoeff; + c->sparc_coeffs[7] = c->vrCoeff; + c->sparc_coeffs[8] = c->ubCoeff; + c->sparc_coeffs[9] = c->ugCoeff; - c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL; - c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL; - c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL; - c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL; - c->sparc_coeffs[4]=(((int16_t)c->vOffset*(int16_t)c->vrCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[0] = (((int16_t)c->yOffset * (int16_t)c->yCoeff >> 11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[1] = (((int16_t)c->uOffset * (int16_t)c->ubCoeff >> 11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[2] = (((int16_t)c->uOffset * (int16_t)c->ugCoeff >> 11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[3] = (((int16_t)c->vOffset * (int16_t)c->vgCoeff >> 11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[4] = (((int16_t)c->vOffset * (int16_t)c->vrCoeff >> 11) & 0xffff) * 0x0001000100010001ULL; - if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV422P && (c->dstW & 7)==0) { - av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV422P -> RGB32 (WARNING: alpha value is wrong)\n"); + if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV422P && (c->dstW & 7) == 0) { + av_log(c, AV_LOG_INFO, + "SPARC VIS accelerated YUV422P -> RGB32 (WARNING: alpha value is wrong)\n"); return vis_422P_ARGB32; - } - else if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV420P && (c->dstW & 7)==0) { - av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV420P -> RGB32 (WARNING: alpha value is wrong)\n"); + } else if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV420P && (c->dstW & 7) == 0) { + av_log(c, AV_LOG_INFO, + "SPARC VIS accelerated YUV420P -> RGB32 (WARNING: alpha value is wrong)\n"); return vis_420P_ARGB32; } return NULL; diff --git a/libswscale/swscale.c b/libswscale/swscale.c index 51dfb86d4c..12ec19bea0 100644 --- a/libswscale/swscale.c +++ b/libswscale/swscale.c @@ -35,120 +35,6 @@ #include "libavutil/bswap.h" #include "libavutil/pixdesc.h" - -#define RGB2YUV_SHIFT 15 -#define BY ( (int)(0.114*219/255*(1<BGR scaler -*/ - -DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={ -{ 1, 3, 1, 3, 1, 3, 1, 3, }, -{ 2, 0, 2, 0, 2, 0, 2, 0, }, -}; - -DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={ -{ 6, 2, 6, 2, 6, 2, 6, 2, }, -{ 0, 4, 0, 4, 0, 4, 0, 4, }, -}; - -DECLARE_ALIGNED(8, const uint8_t, dither_4x4_16)[4][8]={ -{ 8, 4, 11, 7, 8, 4, 11, 7, }, -{ 2, 14, 1, 13, 2, 14, 1, 13, }, -{ 10, 6, 9, 5, 10, 6, 9, 5, }, -{ 0, 12, 3, 15, 0, 12, 3, 15, }, -}; - -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_32)[8][8]={ -{ 17, 9, 23, 15, 16, 8, 22, 14, }, -{ 5, 29, 3, 27, 4, 28, 2, 26, }, -{ 21, 13, 19, 11, 20, 12, 18, 10, }, -{ 0, 24, 6, 30, 1, 25, 7, 31, }, -{ 16, 8, 22, 14, 17, 9, 23, 15, }, -{ 4, 28, 2, 26, 5, 29, 3, 27, }, -{ 20, 12, 18, 10, 21, 13, 19, 11, }, -{ 1, 25, 7, 31, 0, 24, 6, 30, }, -}; - -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_73)[8][8]={ -{ 0, 55, 14, 68, 3, 58, 17, 72, }, -{ 37, 18, 50, 32, 40, 22, 54, 35, }, -{ 9, 64, 5, 59, 13, 67, 8, 63, }, -{ 46, 27, 41, 23, 49, 31, 44, 26, }, -{ 2, 57, 16, 71, 1, 56, 15, 70, }, -{ 39, 21, 52, 34, 38, 19, 51, 33, }, -{ 11, 66, 7, 62, 10, 65, 6, 60, }, -{ 48, 30, 43, 25, 47, 29, 42, 24, }, -}; - -#if 1 -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ -{117, 62, 158, 103, 113, 58, 155, 100, }, -{ 34, 199, 21, 186, 31, 196, 17, 182, }, -{144, 89, 131, 76, 141, 86, 127, 72, }, -{ 0, 165, 41, 206, 10, 175, 52, 217, }, -{110, 55, 151, 96, 120, 65, 162, 107, }, -{ 28, 193, 14, 179, 38, 203, 24, 189, }, -{138, 83, 124, 69, 148, 93, 134, 79, }, -{ 7, 172, 48, 213, 3, 168, 45, 210, }, -}; -#elif 1 -// tries to correct a gamma of 1.5 -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ -{ 0, 143, 18, 200, 2, 156, 25, 215, }, -{ 78, 28, 125, 64, 89, 36, 138, 74, }, -{ 10, 180, 3, 161, 16, 195, 8, 175, }, -{109, 51, 93, 38, 121, 60, 105, 47, }, -{ 1, 152, 23, 210, 0, 147, 20, 205, }, -{ 85, 33, 134, 71, 81, 30, 130, 67, }, -{ 14, 190, 6, 171, 12, 185, 5, 166, }, -{117, 57, 101, 44, 113, 54, 97, 41, }, -}; -#elif 1 -// tries to correct a gamma of 2.0 -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ -{ 0, 124, 8, 193, 0, 140, 12, 213, }, -{ 55, 14, 104, 42, 66, 19, 119, 52, }, -{ 3, 168, 1, 145, 6, 187, 3, 162, }, -{ 86, 31, 70, 21, 99, 39, 82, 28, }, -{ 0, 134, 11, 206, 0, 129, 9, 200, }, -{ 62, 17, 114, 48, 58, 16, 109, 45, }, -{ 5, 181, 2, 157, 4, 175, 1, 151, }, -{ 95, 36, 78, 26, 90, 34, 74, 24, }, -}; -#else -// tries to correct a gamma of 2.5 -DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={ -{ 0, 107, 3, 187, 0, 125, 6, 212, }, -{ 39, 7, 86, 28, 49, 11, 102, 36, }, -{ 1, 158, 0, 131, 3, 180, 1, 151, }, -{ 68, 19, 52, 12, 81, 25, 64, 17, }, -{ 0, 119, 5, 203, 0, 113, 4, 195, }, -{ 45, 9, 96, 33, 42, 8, 91, 30, }, -{ 2, 172, 1, 144, 2, 165, 0, 137, }, -{ 77, 23, 60, 15, 72, 21, 56, 14, }, -}; -#endif DECLARE_ALIGNED(8, const uint8_t, dither_8x8_128)[8][8] = { { 36, 68, 60, 92, 34, 66, 58, 90,}, { 100, 4,124, 28, 98, 2,122, 26,}, @@ -162,1250 +48,6 @@ DECLARE_ALIGNED(8, const uint8_t, dither_8x8_128)[8][8] = { DECLARE_ALIGNED(8, const uint8_t, ff_sws_pb_64)[8] = { 64, 64, 64, 64, 64, 64, 64, 64 }; -DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={ -{ - { 0, 1, 0, 1, 0, 1, 0, 1,}, - { 1, 0, 1, 0, 1, 0, 1, 0,}, - { 0, 1, 0, 1, 0, 1, 0, 1,}, - { 1, 0, 1, 0, 1, 0, 1, 0,}, - { 0, 1, 0, 1, 0, 1, 0, 1,}, - { 1, 0, 1, 0, 1, 0, 1, 0,}, - { 0, 1, 0, 1, 0, 1, 0, 1,}, - { 1, 0, 1, 0, 1, 0, 1, 0,}, -},{ - { 1, 2, 1, 2, 1, 2, 1, 2,}, - { 3, 0, 3, 0, 3, 0, 3, 0,}, - { 1, 2, 1, 2, 1, 2, 1, 2,}, - { 3, 0, 3, 0, 3, 0, 3, 0,}, - { 1, 2, 1, 2, 1, 2, 1, 2,}, - { 3, 0, 3, 0, 3, 0, 3, 0,}, - { 1, 2, 1, 2, 1, 2, 1, 2,}, - { 3, 0, 3, 0, 3, 0, 3, 0,}, -},{ - { 2, 4, 3, 5, 2, 4, 3, 5,}, - { 6, 0, 7, 1, 6, 0, 7, 1,}, - { 3, 5, 2, 4, 3, 5, 2, 4,}, - { 7, 1, 6, 0, 7, 1, 6, 0,}, - { 2, 4, 3, 5, 2, 4, 3, 5,}, - { 6, 0, 7, 1, 6, 0, 7, 1,}, - { 3, 5, 2, 4, 3, 5, 2, 4,}, - { 7, 1, 6, 0, 7, 1, 6, 0,}, -},{ - { 4, 8, 7, 11, 4, 8, 7, 11,}, - { 12, 0, 15, 3, 12, 0, 15, 3,}, - { 6, 10, 5, 9, 6, 10, 5, 9,}, - { 14, 2, 13, 1, 14, 2, 13, 1,}, - { 4, 8, 7, 11, 4, 8, 7, 11,}, - { 12, 0, 15, 3, 12, 0, 15, 3,}, - { 6, 10, 5, 9, 6, 10, 5, 9,}, - { 14, 2, 13, 1, 14, 2, 13, 1,}, -},{ - { 9, 17, 15, 23, 8, 16, 14, 22,}, - { 25, 1, 31, 7, 24, 0, 30, 6,}, - { 13, 21, 11, 19, 12, 20, 10, 18,}, - { 29, 5, 27, 3, 28, 4, 26, 2,}, - { 8, 16, 14, 22, 9, 17, 15, 23,}, - { 24, 0, 30, 6, 25, 1, 31, 7,}, - { 12, 20, 10, 18, 13, 21, 11, 19,}, - { 28, 4, 26, 2, 29, 5, 27, 3,}, -},{ - { 18, 34, 30, 46, 17, 33, 29, 45,}, - { 50, 2, 62, 14, 49, 1, 61, 13,}, - { 26, 42, 22, 38, 25, 41, 21, 37,}, - { 58, 10, 54, 6, 57, 9, 53, 5,}, - { 16, 32, 28, 44, 19, 35, 31, 47,}, - { 48, 0, 60, 12, 51, 3, 63, 15,}, - { 24, 40, 20, 36, 27, 43, 23, 39,}, - { 56, 8, 52, 4, 59, 11, 55, 7,}, -},{ - { 18, 34, 30, 46, 17, 33, 29, 45,}, - { 50, 2, 62, 14, 49, 1, 61, 13,}, - { 26, 42, 22, 38, 25, 41, 21, 37,}, - { 58, 10, 54, 6, 57, 9, 53, 5,}, - { 16, 32, 28, 44, 19, 35, 31, 47,}, - { 48, 0, 60, 12, 51, 3, 63, 15,}, - { 24, 40, 20, 36, 27, 43, 23, 39,}, - { 56, 8, 52, 4, 59, 11, 55, 7,}, -},{ - { 36, 68, 60, 92, 34, 66, 58, 90,}, - { 100, 4,124, 28, 98, 2,122, 26,}, - { 52, 84, 44, 76, 50, 82, 42, 74,}, - { 116, 20,108, 12,114, 18,106, 10,}, - { 32, 64, 56, 88, 38, 70, 62, 94,}, - { 96, 0,120, 24,102, 6,126, 30,}, - { 48, 80, 40, 72, 54, 86, 46, 78,}, - { 112, 16,104, 8,118, 22,110, 14,}, -}}; - -static const uint8_t flat64[8]={64,64,64,64,64,64,64,64}; - -const uint16_t dither_scale[15][16]={ -{ 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,}, -{ 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,}, -{ 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,}, -{ 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,}, -{ 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,}, -{ 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,}, -{ 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,}, -{ 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,}, -{ 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,}, -{ 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,}, -{ 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,}, -{ 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,}, -{ 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,}, -{ 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,}, -{ 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,}, -}; - -#define output_pixel(pos, val, bias, signedness) \ - if (big_endian) { \ - AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ - } else { \ - AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \ - } - -static av_always_inline void -yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, - int big_endian, int output_bits) -{ - int i; - int shift = 3; - av_assert0(output_bits == 16); - - for (i = 0; i < dstW; i++) { - int val = src[i] + (1 << (shift - 1)); - output_pixel(&dest[i], val, 0, uint); - } -} - -static av_always_inline void -yuv2planeX_16_c_template(const int16_t *filter, int filterSize, - const int32_t **src, uint16_t *dest, int dstW, - int big_endian, int output_bits) -{ - int i; - int shift = 15; - av_assert0(output_bits == 16); - - for (i = 0; i < dstW; i++) { - int val = 1 << (shift - 1); - int j; - - /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline - * filters (or anything with negative coeffs, the range can be slightly - * wider in both directions. To account for this overflow, we subtract - * a constant so it always fits in the signed range (assuming a - * reasonable filterSize), and re-add that at the end. */ - val -= 0x40000000; - for (j = 0; j < filterSize; j++) - val += src[j][i] * filter[j]; - - output_pixel(&dest[i], val, 0x8000, int); - } -} - -#undef output_pixel - -#define output_pixel(pos, val) \ - if (big_endian) { \ - AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ - } else { \ - AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ - } - -static av_always_inline void -yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW, - int big_endian, int output_bits) -{ - int i; - int shift = 15 - output_bits; - - for (i = 0; i < dstW; i++) { - int val = src[i] + (1 << (shift - 1)); - output_pixel(&dest[i], val); - } -} - -static av_always_inline void -yuv2planeX_10_c_template(const int16_t *filter, int filterSize, - const int16_t **src, uint16_t *dest, int dstW, - int big_endian, int output_bits) -{ - int i; - int shift = 11 + 16 - output_bits; - - for (i = 0; i < dstW; i++) { - int val = 1 << (shift - 1); - int j; - - for (j = 0; j < filterSize; j++) - val += src[j][i] * filter[j]; - - output_pixel(&dest[i], val); - } -} - -#undef output_pixel - -#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \ -static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \ - uint8_t *dest, int dstW, \ - const uint8_t *dither, int offset)\ -{ \ - yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \ - (uint16_t *) dest, dstW, is_be, bits); \ -}\ -static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \ - const int16_t **src, uint8_t *dest, int dstW, \ - const uint8_t *dither, int offset)\ -{ \ - yuv2planeX_## template_size ## _c_template(filter, \ - filterSize, (const typeX_t **) src, \ - (uint16_t *) dest, dstW, is_be, bits); \ -} -yuv2NBPS( 9, BE, 1, 10, int16_t) -yuv2NBPS( 9, LE, 0, 10, int16_t) -yuv2NBPS(10, BE, 1, 10, int16_t) -yuv2NBPS(10, LE, 0, 10, int16_t) -yuv2NBPS(16, BE, 1, 16, int32_t) -yuv2NBPS(16, LE, 0, 16, int32_t) - -static void yuv2planeX_8_c(const int16_t *filter, int filterSize, - const int16_t **src, uint8_t *dest, int dstW, - const uint8_t *dither, int offset) -{ - int i; - for (i=0; i>19); - } -} - -static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, - const uint8_t *dither, int offset) -{ - int i; - for (i=0; i> 7; - dest[i]= av_clip_uint8(val); - } -} - -static void yuv2nv12cX_c(SwsContext *c, const int16_t *chrFilter, int chrFilterSize, - const int16_t **chrUSrc, const int16_t **chrVSrc, - uint8_t *dest, int chrDstW) -{ - enum PixelFormat dstFormat = c->dstFormat; - const uint8_t *chrDither = c->chrDither8; - int i; - - if (dstFormat == PIX_FMT_NV12) - for (i=0; i>19); - dest[2*i+1]= av_clip_uint8(v>>19); - } - else - for (i=0; i>19); - dest[2*i+1]= av_clip_uint8(u>>19); - } -} - -#define output_pixel(pos, val) \ - if (target == PIX_FMT_GRAY16BE) { \ - AV_WB16(pos, val); \ - } else { \ - AV_WL16(pos, val); \ - } - -static av_always_inline void -yuv2gray16_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int32_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int32_t **chrUSrc, - const int32_t **chrVSrc, int chrFilterSize, - const int32_t **alpSrc, uint16_t *dest, int dstW, - int y, enum PixelFormat target) -{ - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int j; - int Y1 = (1 << 14) - 0x40000000; - int Y2 = (1 << 14) - 0x40000000; - - for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i * 2] * lumFilter[j]; - Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; - } - Y1 >>= 15; - Y2 >>= 15; - Y1 = av_clip_int16(Y1); - Y2 = av_clip_int16(Y2); - output_pixel(&dest[i * 2 + 0], 0x8000 + Y1); - output_pixel(&dest[i * 2 + 1], 0x8000 + Y2); - } -} - -static av_always_inline void -yuv2gray16_2_c_template(SwsContext *c, const int32_t *buf[2], - const int32_t *ubuf[2], const int32_t *vbuf[2], - const int32_t *abuf[2], uint16_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target) -{ - int yalpha1 = 4095 - yalpha; - int i; - const int32_t *buf0 = buf[0], *buf1 = buf[1]; - - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2 ] * yalpha1 + buf1[i * 2 ] * yalpha) >> 15; - int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 15; - - output_pixel(&dest[i * 2 + 0], Y1); - output_pixel(&dest[i * 2 + 1], Y2); - } -} - -static av_always_inline void -yuv2gray16_1_c_template(SwsContext *c, const int32_t *buf0, - const int32_t *ubuf[2], const int32_t *vbuf[2], - const int32_t *abuf0, uint16_t *dest, int dstW, - int uvalpha, int y, enum PixelFormat target) -{ - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2 ]+4)>>3; - int Y2 = (buf0[i * 2 + 1]+4)>>3; - - output_pixel(&dest[i * 2 + 0], Y1); - output_pixel(&dest[i * 2 + 1], Y2); - } -} - -#undef output_pixel - -#define YUV2PACKED16WRAPPER(name, base, ext, fmt) \ -static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ - const int16_t **_lumSrc, int lumFilterSize, \ - const int16_t *chrFilter, const int16_t **_chrUSrc, \ - const int16_t **_chrVSrc, int chrFilterSize, \ - const int16_t **_alpSrc, uint8_t *_dest, int dstW, \ - int y) \ -{ \ - const int32_t **lumSrc = (const int32_t **) _lumSrc, \ - **chrUSrc = (const int32_t **) _chrUSrc, \ - **chrVSrc = (const int32_t **) _chrVSrc, \ - **alpSrc = (const int32_t **) _alpSrc; \ - uint16_t *dest = (uint16_t *) _dest; \ - name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ - chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ - alpSrc, dest, dstW, y, fmt); \ -} \ - \ -static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \ - const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ - const int16_t *_abuf[2], uint8_t *_dest, int dstW, \ - int yalpha, int uvalpha, int y) \ -{ \ - const int32_t **buf = (const int32_t **) _buf, \ - **ubuf = (const int32_t **) _ubuf, \ - **vbuf = (const int32_t **) _vbuf, \ - **abuf = (const int32_t **) _abuf; \ - uint16_t *dest = (uint16_t *) _dest; \ - name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ - dest, dstW, yalpha, uvalpha, y, fmt); \ -} \ - \ -static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \ - const int16_t *_ubuf[2], const int16_t *_vbuf[2], \ - const int16_t *_abuf0, uint8_t *_dest, int dstW, \ - int uvalpha, int y) \ -{ \ - const int32_t *buf0 = (const int32_t *) _buf0, \ - **ubuf = (const int32_t **) _ubuf, \ - **vbuf = (const int32_t **) _vbuf, \ - *abuf0 = (const int32_t *) _abuf0; \ - uint16_t *dest = (uint16_t *) _dest; \ - name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ - dstW, uvalpha, y, fmt); \ -} - -YUV2PACKED16WRAPPER(yuv2gray16,, LE, PIX_FMT_GRAY16LE) -YUV2PACKED16WRAPPER(yuv2gray16,, BE, PIX_FMT_GRAY16BE) - -#define output_pixel(pos, acc) \ - if (target == PIX_FMT_MONOBLACK) { \ - pos = acc; \ - } else { \ - pos = ~acc; \ - } - -static av_always_inline void -yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, - int y, enum PixelFormat target) -{ - const uint8_t * const d128=dither_8x8_220[y&7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; - int i; - unsigned acc = 0; - - for (i = 0; i < dstW - 1; i += 2) { - int j; - int Y1 = 1 << 18; - int Y2 = 1 << 18; - - for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i] * lumFilter[j]; - Y2 += lumSrc[j][i+1] * lumFilter[j]; - } - Y1 >>= 19; - Y2 >>= 19; - if ((Y1 | Y2) & 0x100) { - Y1 = av_clip_uint8(Y1); - Y2 = av_clip_uint8(Y2); - } - acc += acc + g[Y1 + d128[(i + 0) & 7]]; - acc += acc + g[Y2 + d128[(i + 1) & 7]]; - if ((i & 7) == 6) { - output_pixel(*dest++, acc); - } - } -} - -static av_always_inline void -yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf[2], uint8_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target) -{ - const int16_t *buf0 = buf[0], *buf1 = buf[1]; - const uint8_t * const d128 = dither_8x8_220[y & 7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; - int yalpha1 = 4095 - yalpha; - int i; - - for (i = 0; i < dstW - 7; i += 8) { - int acc = g[((buf0[i ] * yalpha1 + buf1[i ] * yalpha) >> 19) + d128[0]]; - acc += acc + g[((buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19) + d128[1]]; - acc += acc + g[((buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19) + d128[2]]; - acc += acc + g[((buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19) + d128[3]]; - acc += acc + g[((buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19) + d128[4]]; - acc += acc + g[((buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19) + d128[5]]; - acc += acc + g[((buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19) + d128[6]]; - acc += acc + g[((buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19) + d128[7]]; - output_pixel(*dest++, acc); - } -} - -static av_always_inline void -yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, int y, enum PixelFormat target) -{ - const uint8_t * const d128 = dither_8x8_220[y & 7]; - uint8_t *g = c->table_gU[128 + YUVRGB_TABLE_HEADROOM] + c->table_gV[128 + YUVRGB_TABLE_HEADROOM]; - int i; - - for (i = 0; i < dstW - 7; i += 8) { - int acc = g[(buf0[i ] >> 7) + d128[0]]; - acc += acc + g[(buf0[i + 1] >> 7) + d128[1]]; - acc += acc + g[(buf0[i + 2] >> 7) + d128[2]]; - acc += acc + g[(buf0[i + 3] >> 7) + d128[3]]; - acc += acc + g[(buf0[i + 4] >> 7) + d128[4]]; - acc += acc + g[(buf0[i + 5] >> 7) + d128[5]]; - acc += acc + g[(buf0[i + 6] >> 7) + d128[6]]; - acc += acc + g[(buf0[i + 7] >> 7) + d128[7]]; - output_pixel(*dest++, acc); - } -} - -#undef output_pixel - -#define YUV2PACKEDWRAPPER(name, base, ext, fmt) \ -static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ - const int16_t **lumSrc, int lumFilterSize, \ - const int16_t *chrFilter, const int16_t **chrUSrc, \ - const int16_t **chrVSrc, int chrFilterSize, \ - const int16_t **alpSrc, uint8_t *dest, int dstW, \ - int y) \ -{ \ - name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ - chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ - alpSrc, dest, dstW, y, fmt); \ -} \ - \ -static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ - const int16_t *ubuf[2], const int16_t *vbuf[2], \ - const int16_t *abuf[2], uint8_t *dest, int dstW, \ - int yalpha, int uvalpha, int y) \ -{ \ - name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ - dest, dstW, yalpha, uvalpha, y, fmt); \ -} \ - \ -static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ - const int16_t *ubuf[2], const int16_t *vbuf[2], \ - const int16_t *abuf0, uint8_t *dest, int dstW, \ - int uvalpha, int y) \ -{ \ - name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \ - abuf0, dest, dstW, uvalpha, \ - y, fmt); \ -} - -YUV2PACKEDWRAPPER(yuv2mono,, white, PIX_FMT_MONOWHITE) -YUV2PACKEDWRAPPER(yuv2mono,, black, PIX_FMT_MONOBLACK) - -#define output_pixels(pos, Y1, U, Y2, V) \ - if (target == PIX_FMT_YUYV422) { \ - dest[pos + 0] = Y1; \ - dest[pos + 1] = U; \ - dest[pos + 2] = Y2; \ - dest[pos + 3] = V; \ - } else { \ - dest[pos + 0] = U; \ - dest[pos + 1] = Y1; \ - dest[pos + 2] = V; \ - dest[pos + 3] = Y2; \ - } - -static av_always_inline void -yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, - int y, enum PixelFormat target) -{ - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int j; - int Y1 = 1 << 18; - int Y2 = 1 << 18; - int U = 1 << 18; - int V = 1 << 18; - - for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i * 2] * lumFilter[j]; - Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; - } - for (j = 0; j < chrFilterSize; j++) { - U += chrUSrc[j][i] * chrFilter[j]; - V += chrVSrc[j][i] * chrFilter[j]; - } - Y1 >>= 19; - Y2 >>= 19; - U >>= 19; - V >>= 19; - if ((Y1 | Y2 | U | V) & 0x100) { - Y1 = av_clip_uint8(Y1); - Y2 = av_clip_uint8(Y2); - U = av_clip_uint8(U); - V = av_clip_uint8(V); - } - output_pixels(4*i, Y1, U, Y2, V); - } -} - -static av_always_inline void -yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf[2], uint8_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target) -{ - const int16_t *buf0 = buf[0], *buf1 = buf[1], - *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; - int yalpha1 = 4095 - yalpha; - int uvalpha1 = 4095 - uvalpha; - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; - int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; - int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; - int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; - - output_pixels(i * 4, Y1, U, Y2, V); - } -} - -static av_always_inline void -yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, int y, enum PixelFormat target) -{ - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; - int i; - - if (uvalpha < 2048) { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = buf0[i * 2] >> 7; - int Y2 = buf0[i * 2 + 1] >> 7; - int U = ubuf1[i] >> 7; - int V = vbuf1[i] >> 7; - - output_pixels(i * 4, Y1, U, Y2, V); - } - } else { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = buf0[i * 2] >> 7; - int Y2 = buf0[i * 2 + 1] >> 7; - int U = (ubuf0[i] + ubuf1[i]) >> 8; - int V = (vbuf0[i] + vbuf1[i]) >> 8; - - output_pixels(i * 4, Y1, U, Y2, V); - } - } -} - -#undef output_pixels - -YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, PIX_FMT_YUYV422) -YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, PIX_FMT_UYVY422) - -#define R_B ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? R : B) -#define B_R ((target == PIX_FMT_RGB48LE || target == PIX_FMT_RGB48BE) ? B : R) -#define output_pixel(pos, val) \ - if (isBE(target)) { \ - AV_WB16(pos, val); \ - } else { \ - AV_WL16(pos, val); \ - } - -static av_always_inline void -yuv2rgb48_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int32_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int32_t **chrUSrc, - const int32_t **chrVSrc, int chrFilterSize, - const int32_t **alpSrc, uint16_t *dest, int dstW, - int y, enum PixelFormat target) -{ - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int j; - int Y1 = -0x40000000; - int Y2 = -0x40000000; - int U = -128 << 23; // 19 - int V = -128 << 23; - int R, G, B; - - for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i * 2] * lumFilter[j]; - Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; - } - for (j = 0; j < chrFilterSize; j++) { - U += chrUSrc[j][i] * chrFilter[j]; - V += chrVSrc[j][i] * chrFilter[j]; - } - - // 8bit: 12+15=27; 16-bit: 12+19=31 - Y1 >>= 14; // 10 - Y1 += 0x10000; - Y2 >>= 14; - Y2 += 0x10000; - U >>= 14; - V >>= 14; - - // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit - Y1 -= c->yuv2rgb_y_offset; - Y2 -= c->yuv2rgb_y_offset; - Y1 *= c->yuv2rgb_y_coeff; - Y2 *= c->yuv2rgb_y_coeff; - Y1 += 1 << 13; // 21 - Y2 += 1 << 13; - // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit - - R = V * c->yuv2rgb_v2r_coeff; - G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; - B = U * c->yuv2rgb_u2b_coeff; - - // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit - output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); - output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); - output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); - output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); - output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); - output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); - dest += 6; - } -} - -static av_always_inline void -yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], - const int32_t *ubuf[2], const int32_t *vbuf[2], - const int32_t *abuf[2], uint16_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target) -{ - const int32_t *buf0 = buf[0], *buf1 = buf[1], - *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; - int yalpha1 = 4095 - yalpha; - int uvalpha1 = 4095 - uvalpha; - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; - int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; - int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; - int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; - int R, G, B; - - Y1 -= c->yuv2rgb_y_offset; - Y2 -= c->yuv2rgb_y_offset; - Y1 *= c->yuv2rgb_y_coeff; - Y2 *= c->yuv2rgb_y_coeff; - Y1 += 1 << 13; - Y2 += 1 << 13; - - R = V * c->yuv2rgb_v2r_coeff; - G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; - B = U * c->yuv2rgb_u2b_coeff; - - output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); - output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); - output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); - output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); - output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); - output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); - dest += 6; - } -} - -static av_always_inline void -yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, - const int32_t *ubuf[2], const int32_t *vbuf[2], - const int32_t *abuf0, uint16_t *dest, int dstW, - int uvalpha, int y, enum PixelFormat target) -{ - const int32_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; - int i; - - if (uvalpha < 2048) { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2] ) >> 2; - int Y2 = (buf0[i * 2 + 1]) >> 2; - int U = (ubuf0[i] + (-128 << 11)) >> 2; - int V = (vbuf0[i] + (-128 << 11)) >> 2; - int R, G, B; - - Y1 -= c->yuv2rgb_y_offset; - Y2 -= c->yuv2rgb_y_offset; - Y1 *= c->yuv2rgb_y_coeff; - Y2 *= c->yuv2rgb_y_coeff; - Y1 += 1 << 13; - Y2 += 1 << 13; - - R = V * c->yuv2rgb_v2r_coeff; - G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; - B = U * c->yuv2rgb_u2b_coeff; - - output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); - output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); - output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); - output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); - output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); - output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); - dest += 6; - } - } else { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2] ) >> 2; - int Y2 = (buf0[i * 2 + 1]) >> 2; - int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; - int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; - int R, G, B; - - Y1 -= c->yuv2rgb_y_offset; - Y2 -= c->yuv2rgb_y_offset; - Y1 *= c->yuv2rgb_y_coeff; - Y2 *= c->yuv2rgb_y_coeff; - Y1 += 1 << 13; - Y2 += 1 << 13; - - R = V * c->yuv2rgb_v2r_coeff; - G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; - B = U * c->yuv2rgb_u2b_coeff; - - output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); - output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); - output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); - output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); - output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); - output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); - dest += 6; - } - } -} - -#undef output_pixel -#undef r_b -#undef b_r - -YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48be, PIX_FMT_RGB48BE) -YUV2PACKED16WRAPPER(yuv2, rgb48, rgb48le, PIX_FMT_RGB48LE) -YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48be, PIX_FMT_BGR48BE) -YUV2PACKED16WRAPPER(yuv2, rgb48, bgr48le, PIX_FMT_BGR48LE) - -/* - * Write out 2 RGB pixels in the target pixel format. This function takes a - * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of - * things like endianness conversion and shifting. The caller takes care of - * setting the correct offset in these tables from the chroma (U/V) values. - * This function then uses the luminance (Y1/Y2) values to write out the - * correct RGB values into the destination buffer. - */ -static av_always_inline void -yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, - unsigned A1, unsigned A2, - const void *_r, const void *_g, const void *_b, int y, - enum PixelFormat target, int hasAlpha) -{ - if (target == PIX_FMT_ARGB || target == PIX_FMT_RGBA || - target == PIX_FMT_ABGR || target == PIX_FMT_BGRA) { - uint32_t *dest = (uint32_t *) _dest; - const uint32_t *r = (const uint32_t *) _r; - const uint32_t *g = (const uint32_t *) _g; - const uint32_t *b = (const uint32_t *) _b; - -#if CONFIG_SMALL - int sh = hasAlpha ? ((target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24) : 0; - - dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0); - dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0); -#else - if (hasAlpha) { - int sh = (target == PIX_FMT_RGB32_1 || target == PIX_FMT_BGR32_1) ? 0 : 24; - - dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh); - dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh); - } else { - dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1]; - dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2]; - } -#endif - } else if (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) { - uint8_t *dest = (uint8_t *) _dest; - const uint8_t *r = (const uint8_t *) _r; - const uint8_t *g = (const uint8_t *) _g; - const uint8_t *b = (const uint8_t *) _b; - -#define r_b ((target == PIX_FMT_RGB24) ? r : b) -#define b_r ((target == PIX_FMT_RGB24) ? b : r) - - dest[i * 6 + 0] = r_b[Y1]; - dest[i * 6 + 1] = g[Y1]; - dest[i * 6 + 2] = b_r[Y1]; - dest[i * 6 + 3] = r_b[Y2]; - dest[i * 6 + 4] = g[Y2]; - dest[i * 6 + 5] = b_r[Y2]; -#undef r_b -#undef b_r - } else if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565 || - target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555 || - target == PIX_FMT_RGB444 || target == PIX_FMT_BGR444) { - uint16_t *dest = (uint16_t *) _dest; - const uint16_t *r = (const uint16_t *) _r; - const uint16_t *g = (const uint16_t *) _g; - const uint16_t *b = (const uint16_t *) _b; - int dr1, dg1, db1, dr2, dg2, db2; - - if (target == PIX_FMT_RGB565 || target == PIX_FMT_BGR565) { - dr1 = dither_2x2_8[ y & 1 ][0]; - dg1 = dither_2x2_4[ y & 1 ][0]; - db1 = dither_2x2_8[(y & 1) ^ 1][0]; - dr2 = dither_2x2_8[ y & 1 ][1]; - dg2 = dither_2x2_4[ y & 1 ][1]; - db2 = dither_2x2_8[(y & 1) ^ 1][1]; - } else if (target == PIX_FMT_RGB555 || target == PIX_FMT_BGR555) { - dr1 = dither_2x2_8[ y & 1 ][0]; - dg1 = dither_2x2_8[ y & 1 ][1]; - db1 = dither_2x2_8[(y & 1) ^ 1][0]; - dr2 = dither_2x2_8[ y & 1 ][1]; - dg2 = dither_2x2_8[ y & 1 ][0]; - db2 = dither_2x2_8[(y & 1) ^ 1][1]; - } else { - dr1 = dither_4x4_16[ y & 3 ][0]; - dg1 = dither_4x4_16[ y & 3 ][1]; - db1 = dither_4x4_16[(y & 3) ^ 3][0]; - dr2 = dither_4x4_16[ y & 3 ][1]; - dg2 = dither_4x4_16[ y & 3 ][0]; - db2 = dither_4x4_16[(y & 3) ^ 3][1]; - } - - dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; - dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; - } else /* 8/4-bit */ { - uint8_t *dest = (uint8_t *) _dest; - const uint8_t *r = (const uint8_t *) _r; - const uint8_t *g = (const uint8_t *) _g; - const uint8_t *b = (const uint8_t *) _b; - int dr1, dg1, db1, dr2, dg2, db2; - - if (target == PIX_FMT_RGB8 || target == PIX_FMT_BGR8) { - const uint8_t * const d64 = dither_8x8_73[y & 7]; - const uint8_t * const d32 = dither_8x8_32[y & 7]; - dr1 = dg1 = d32[(i * 2 + 0) & 7]; - db1 = d64[(i * 2 + 0) & 7]; - dr2 = dg2 = d32[(i * 2 + 1) & 7]; - db2 = d64[(i * 2 + 1) & 7]; - } else { - const uint8_t * const d64 = dither_8x8_73 [y & 7]; - const uint8_t * const d128 = dither_8x8_220[y & 7]; - dr1 = db1 = d128[(i * 2 + 0) & 7]; - dg1 = d64[(i * 2 + 0) & 7]; - dr2 = db2 = d128[(i * 2 + 1) & 7]; - dg2 = d64[(i * 2 + 1) & 7]; - } - - if (target == PIX_FMT_RGB4 || target == PIX_FMT_BGR4) { - dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] + - ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4); - } else { - dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1]; - dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]; - } - } -} - -static av_always_inline void -yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, int dstW, - int y, enum PixelFormat target, int hasAlpha) -{ - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int j; - int Y1 = 1 << 18; - int Y2 = 1 << 18; - int U = 1 << 18; - int V = 1 << 18; - int av_unused A1, A2; - const void *r, *g, *b; - - for (j = 0; j < lumFilterSize; j++) { - Y1 += lumSrc[j][i * 2] * lumFilter[j]; - Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j]; - } - for (j = 0; j < chrFilterSize; j++) { - U += chrUSrc[j][i] * chrFilter[j]; - V += chrVSrc[j][i] * chrFilter[j]; - } - Y1 >>= 19; - Y2 >>= 19; - U >>= 19; - V >>= 19; - if (hasAlpha) { - A1 = 1 << 18; - A2 = 1 << 18; - for (j = 0; j < lumFilterSize; j++) { - A1 += alpSrc[j][i * 2 ] * lumFilter[j]; - A2 += alpSrc[j][i * 2 + 1] * lumFilter[j]; - } - A1 >>= 19; - A2 >>= 19; - if ((A1 | A2) & 0x100) { - A1 = av_clip_uint8(A1); - A2 = av_clip_uint8(A2); - } - } - - r = c->table_rV[V + YUVRGB_TABLE_HEADROOM]; - g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]); - b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; - - yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, - r, g, b, y, target, hasAlpha); - } -} - -static av_always_inline void -yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf[2], uint8_t *dest, int dstW, - int yalpha, int uvalpha, int y, - enum PixelFormat target, int hasAlpha) -{ - const int16_t *buf0 = buf[0], *buf1 = buf[1], - *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], - *abuf0 = hasAlpha ? abuf[0] : NULL, - *abuf1 = hasAlpha ? abuf[1] : NULL; - int yalpha1 = 4095 - yalpha; - int uvalpha1 = 4095 - uvalpha; - int i; - - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; - int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; - int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; - int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; - int A1, A2; - const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], - *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), - *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; - - if (hasAlpha) { - A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19; - A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19; - } - - yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, - r, g, b, y, target, hasAlpha); - } -} - -static av_always_inline void -yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, - const int16_t *ubuf[2], const int16_t *vbuf[2], - const int16_t *abuf0, uint8_t *dest, int dstW, - int uvalpha, int y, enum PixelFormat target, - int hasAlpha) -{ - const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], - *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; - int i; - - if (uvalpha < 2048) { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = buf0[i * 2] >> 7; - int Y2 = buf0[i * 2 + 1] >> 7; - int U = ubuf1[i] >> 7; - int V = vbuf1[i] >> 7; - int A1, A2; - const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], - *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), - *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; - - if (hasAlpha) { - A1 = abuf0[i * 2 ] >> 7; - A2 = abuf0[i * 2 + 1] >> 7; - } - - yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, - r, g, b, y, target, hasAlpha); - } - } else { - for (i = 0; i < (dstW >> 1); i++) { - int Y1 = buf0[i * 2] >> 7; - int Y2 = buf0[i * 2 + 1] >> 7; - int U = (ubuf0[i] + ubuf1[i]) >> 8; - int V = (vbuf0[i] + vbuf1[i]) >> 8; - int A1, A2; - const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM], - *g = (c->table_gU[U + YUVRGB_TABLE_HEADROOM] + c->table_gV[V + YUVRGB_TABLE_HEADROOM]), - *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM]; - - if (hasAlpha) { - A1 = abuf0[i * 2 ] >> 7; - A2 = abuf0[i * 2 + 1] >> 7; - } - - yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0, - r, g, b, y, target, hasAlpha); - } - } -} - -#define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ -static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \ - const int16_t **lumSrc, int lumFilterSize, \ - const int16_t *chrFilter, const int16_t **chrUSrc, \ - const int16_t **chrVSrc, int chrFilterSize, \ - const int16_t **alpSrc, uint8_t *dest, int dstW, \ - int y) \ -{ \ - name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \ - chrFilter, chrUSrc, chrVSrc, chrFilterSize, \ - alpSrc, dest, dstW, y, fmt, hasAlpha); \ -} -#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \ -YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \ -static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \ - const int16_t *ubuf[2], const int16_t *vbuf[2], \ - const int16_t *abuf[2], uint8_t *dest, int dstW, \ - int yalpha, int uvalpha, int y) \ -{ \ - name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \ - dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \ -} \ - \ -static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \ - const int16_t *ubuf[2], const int16_t *vbuf[2], \ - const int16_t *abuf0, uint8_t *dest, int dstW, \ - int uvalpha, int y) \ -{ \ - name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \ - dstW, uvalpha, y, fmt, hasAlpha); \ -} - -#if CONFIG_SMALL -YUV2RGBWRAPPER(yuv2rgb,, 32_1, PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -YUV2RGBWRAPPER(yuv2rgb,, 32, PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -#else -#if CONFIG_SWSCALE_ALPHA -YUV2RGBWRAPPER(yuv2rgb,, a32_1, PIX_FMT_RGB32_1, 1) -YUV2RGBWRAPPER(yuv2rgb,, a32, PIX_FMT_RGB32, 1) -#endif -YUV2RGBWRAPPER(yuv2rgb,, x32_1, PIX_FMT_RGB32_1, 0) -YUV2RGBWRAPPER(yuv2rgb,, x32, PIX_FMT_RGB32, 0) -#endif -YUV2RGBWRAPPER(yuv2, rgb, rgb24, PIX_FMT_RGB24, 0) -YUV2RGBWRAPPER(yuv2, rgb, bgr24, PIX_FMT_BGR24, 0) -YUV2RGBWRAPPER(yuv2rgb,, 16, PIX_FMT_RGB565, 0) -YUV2RGBWRAPPER(yuv2rgb,, 15, PIX_FMT_RGB555, 0) -YUV2RGBWRAPPER(yuv2rgb,, 12, PIX_FMT_RGB444, 0) -YUV2RGBWRAPPER(yuv2rgb,, 8, PIX_FMT_RGB8, 0) -YUV2RGBWRAPPER(yuv2rgb,, 4, PIX_FMT_RGB4, 0) -YUV2RGBWRAPPER(yuv2rgb,, 4b, PIX_FMT_RGB4_BYTE, 0) - -static av_always_inline void -yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, - const int16_t **lumSrc, int lumFilterSize, - const int16_t *chrFilter, const int16_t **chrUSrc, - const int16_t **chrVSrc, int chrFilterSize, - const int16_t **alpSrc, uint8_t *dest, - int dstW, int y, enum PixelFormat target, int hasAlpha) -{ - int i; - int step = (target == PIX_FMT_RGB24 || target == PIX_FMT_BGR24) ? 3 : 4; - - for (i = 0; i < dstW; i++) { - int j; - int Y = 1<<9; - int U = (1<<9)-(128 << 19); - int V = (1<<9)-(128 << 19); - int av_unused A; - int R, G, B; - - for (j = 0; j < lumFilterSize; j++) { - Y += lumSrc[j][i] * lumFilter[j]; - } - for (j = 0; j < chrFilterSize; j++) { - U += chrUSrc[j][i] * chrFilter[j]; - V += chrVSrc[j][i] * chrFilter[j]; - } - Y >>= 10; - U >>= 10; - V >>= 10; - if (hasAlpha) { - A = 1 << 18; - for (j = 0; j < lumFilterSize; j++) { - A += alpSrc[j][i] * lumFilter[j]; - } - A >>= 19; - if (A & 0x100) - A = av_clip_uint8(A); - } - Y -= c->yuv2rgb_y_offset; - Y *= c->yuv2rgb_y_coeff; - Y += 1 << 21; - R = Y + V*c->yuv2rgb_v2r_coeff; - G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff; - B = Y + U*c->yuv2rgb_u2b_coeff; - if ((R | G | B) & 0xC0000000) { - R = av_clip_uintp2(R, 30); - G = av_clip_uintp2(G, 30); - B = av_clip_uintp2(B, 30); - } - - switch(target) { - case PIX_FMT_ARGB: - dest[0] = hasAlpha ? A : 255; - dest[1] = R >> 22; - dest[2] = G >> 22; - dest[3] = B >> 22; - break; - case PIX_FMT_RGB24: - dest[0] = R >> 22; - dest[1] = G >> 22; - dest[2] = B >> 22; - break; - case PIX_FMT_RGBA: - dest[0] = R >> 22; - dest[1] = G >> 22; - dest[2] = B >> 22; - dest[3] = hasAlpha ? A : 255; - break; - case PIX_FMT_ABGR: - dest[0] = hasAlpha ? A : 255; - dest[1] = B >> 22; - dest[2] = G >> 22; - dest[3] = R >> 22; - break; - case PIX_FMT_BGR24: - dest[0] = B >> 22; - dest[1] = G >> 22; - dest[2] = R >> 22; - break; - case PIX_FMT_BGRA: - dest[0] = B >> 22; - dest[1] = G >> 22; - dest[2] = R >> 22; - dest[3] = hasAlpha ? A : 255; - break; - } - dest += step; - } -} - -#if CONFIG_SMALL -YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->alpPixBuf) -#else -#if CONFIG_SWSCALE_ALPHA -YUV2RGBWRAPPERX(yuv2, rgb_full, bgra32_full, PIX_FMT_BGRA, 1) -YUV2RGBWRAPPERX(yuv2, rgb_full, abgr32_full, PIX_FMT_ABGR, 1) -YUV2RGBWRAPPERX(yuv2, rgb_full, rgba32_full, PIX_FMT_RGBA, 1) -YUV2RGBWRAPPERX(yuv2, rgb_full, argb32_full, PIX_FMT_ARGB, 1) -#endif -YUV2RGBWRAPPERX(yuv2, rgb_full, bgrx32_full, PIX_FMT_BGRA, 0) -YUV2RGBWRAPPERX(yuv2, rgb_full, xbgr32_full, PIX_FMT_ABGR, 0) -YUV2RGBWRAPPERX(yuv2, rgb_full, rgbx32_full, PIX_FMT_RGBA, 0) -YUV2RGBWRAPPERX(yuv2, rgb_full, xrgb32_full, PIX_FMT_ARGB, 0) -#endif -YUV2RGBWRAPPERX(yuv2, rgb_full, bgr24_full, PIX_FMT_BGR24, 0) -YUV2RGBWRAPPERX(yuv2, rgb_full, rgb24_full, PIX_FMT_RGB24, 0) static av_always_inline void fillPlane(uint8_t* plane, int stride, int width, int height, @@ -1419,583 +61,6 @@ static av_always_inline void fillPlane(uint8_t* plane, int stride, } } -#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos)) - -#define r ((origin == PIX_FMT_BGR48BE || origin == PIX_FMT_BGR48LE) ? b_r : r_b) -#define b ((origin == PIX_FMT_BGR48BE || origin == PIX_FMT_BGR48LE) ? r_b : b_r) - -static av_always_inline void -rgb48ToY_c_template(uint16_t *dst, const uint16_t *src, int width, - enum PixelFormat origin) -{ - int i; - for (i = 0; i < width; i++) { - unsigned int r_b = input_pixel(&src[i*3+0]); - unsigned int g = input_pixel(&src[i*3+1]); - unsigned int b_r = input_pixel(&src[i*3+2]); - - dst[i] = (RY*r + GY*g + BY*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; - } -} - -static av_always_inline void -rgb48ToUV_c_template(uint16_t *dstU, uint16_t *dstV, - const uint16_t *src1, const uint16_t *src2, - int width, enum PixelFormat origin) -{ - int i; - assert(src1==src2); - for (i = 0; i < width; i++) { - int r_b = input_pixel(&src1[i*3+0]); - int g = input_pixel(&src1[i*3+1]); - int b_r = input_pixel(&src1[i*3+2]); - - dstU[i] = (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; - dstV[i] = (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; - } -} - -static av_always_inline void -rgb48ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV, - const uint16_t *src1, const uint16_t *src2, - int width, enum PixelFormat origin) -{ - int i; - assert(src1==src2); - for (i = 0; i < width; i++) { - int r_b = (input_pixel(&src1[6 * i + 0]) + input_pixel(&src1[6 * i + 3]) + 1) >> 1; - int g = (input_pixel(&src1[6 * i + 1]) + input_pixel(&src1[6 * i + 4]) + 1) >> 1; - int b_r = (input_pixel(&src1[6 * i + 2]) + input_pixel(&src1[6 * i + 5]) + 1) >> 1; - - dstU[i]= (RU*r + GU*g + BU*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; - dstV[i]= (RV*r + GV*g + BV*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; - } -} - -#undef r -#undef b -#undef input_pixel - -#define rgb48funcs(pattern, BE_LE, origin) \ -static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\ - int width, uint32_t *unused) \ -{ \ - const uint16_t *src = (const uint16_t *) _src; \ - uint16_t *dst = (uint16_t *) _dst; \ - rgb48ToY_c_template(dst, src, width, origin); \ -} \ - \ -static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \ - const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ - int width, uint32_t *unused) \ -{ \ - const uint16_t *src1 = (const uint16_t *) _src1, \ - *src2 = (const uint16_t *) _src2; \ - uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ - rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin); \ -} \ - \ -static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \ - const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ - int width, uint32_t *unused) \ -{ \ - const uint16_t *src1 = (const uint16_t *) _src1, \ - *src2 = (const uint16_t *) _src2; \ - uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ - rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin); \ -} - -rgb48funcs(rgb, LE, PIX_FMT_RGB48LE) -rgb48funcs(rgb, BE, PIX_FMT_RGB48BE) -rgb48funcs(bgr, LE, PIX_FMT_BGR48LE) -rgb48funcs(bgr, BE, PIX_FMT_BGR48BE) - -#define input_pixel(i) ((origin == PIX_FMT_RGBA || origin == PIX_FMT_BGRA || \ - origin == PIX_FMT_ARGB || origin == PIX_FMT_ABGR) ? AV_RN32A(&src[(i)*4]) : \ - (isBE(origin) ? AV_RB16(&src[(i)*2]) : AV_RL16(&src[(i)*2]))) - -static av_always_inline void -rgb16_32ToY_c_template(int16_t *dst, const uint8_t *src, - int width, enum PixelFormat origin, - int shr, int shg, int shb, int shp, - int maskr, int maskg, int maskb, - int rsh, int gsh, int bsh, int S) -{ - const int ry = RY << rsh, gy = GY << gsh, by = BY << bsh; - const unsigned rnd = (32<<((S)-1)) + (1<<(S-7)); - int i; - - for (i = 0; i < width; i++) { - int px = input_pixel(i) >> shp; - int b = (px & maskb) >> shb; - int g = (px & maskg) >> shg; - int r = (px & maskr) >> shr; - - dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6); - } -} - -static av_always_inline void -rgb16_32ToUV_c_template(int16_t *dstU, int16_t *dstV, - const uint8_t *src, int width, - enum PixelFormat origin, - int shr, int shg, int shb, int shp, - int maskr, int maskg, int maskb, - int rsh, int gsh, int bsh, int S) -{ - const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh, - rv = RV << rsh, gv = GV << gsh, bv = BV << bsh; - const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7)); - int i; - - for (i = 0; i < width; i++) { - int px = input_pixel(i) >> shp; - int b = (px & maskb) >> shb; - int g = (px & maskg) >> shg; - int r = (px & maskr) >> shr; - - dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6); - dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6); - } -} - -static av_always_inline void -rgb16_32ToUV_half_c_template(int16_t *dstU, int16_t *dstV, - const uint8_t *src, int width, - enum PixelFormat origin, - int shr, int shg, int shb, int shp, - int maskr, int maskg, int maskb, - int rsh, int gsh, int bsh, int S) -{ - const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh, - rv = RV << rsh, gv = GV << gsh, bv = BV << bsh, - maskgx = ~(maskr | maskb); - const unsigned rnd = (256U<<(S)) + (1<<(S-6)); - int i; - - maskr |= maskr << 1; maskb |= maskb << 1; maskg |= maskg << 1; - for (i = 0; i < width; i++) { - int px0 = input_pixel(2 * i + 0) >> shp; - int px1 = input_pixel(2 * i + 1) >> shp; - int b, r, g = (px0 & maskgx) + (px1 & maskgx); - int rb = px0 + px1 - g; - - b = (rb & maskb) >> shb; - if (shp || origin == PIX_FMT_BGR565LE || origin == PIX_FMT_BGR565BE || - origin == PIX_FMT_RGB565LE || origin == PIX_FMT_RGB565BE) { - g >>= shg; - } else { - g = (g & maskg) >> shg; - } - r = (rb & maskr) >> shr; - - dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1); - dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1); - } -} - -#undef input_pixel - -#define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \ - maskg, maskb, rsh, gsh, bsh, S) \ -static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \ - int width, uint32_t *unused) \ -{ \ - rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, \ - shr, shg, shb, shp, \ - maskr, maskg, maskb, rsh, gsh, bsh, S); \ -} \ - \ -static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \ - const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \ - int width, uint32_t *unused) \ -{ \ - rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ - shr, shg, shb, shp, \ - maskr, maskg, maskb, rsh, gsh, bsh, S); \ -} \ - \ -static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \ - const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \ - int width, uint32_t *unused) \ -{ \ - rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ - shr, shg, shb, shp, \ - maskr, maskg, maskb, rsh, gsh, bsh, S); \ -} - -rgb16_32_wrapper(PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7) -rgb16_32_wrapper(PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT+4) -rgb16_32_wrapper(PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7) -rgb16_32_wrapper(PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT+4) -rgb16_32_wrapper(PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT+7) -rgb16_32_wrapper(PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT+4) -rgb16_32_wrapper(PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT+8) -rgb16_32_wrapper(PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT+7) -rgb16_32_wrapper(PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT+4) - -static void gbr24pToUV_half_c(uint16_t *dstU, uint16_t *dstV, - const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc, - int width, enum PixelFormat origin) -{ - int i; - for (i = 0; i < width; i++) { - unsigned int g = gsrc[2*i] + gsrc[2*i+1]; - unsigned int b = bsrc[2*i] + bsrc[2*i+1]; - unsigned int r = rsrc[2*i] + rsrc[2*i+1]; - - dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1); - dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1); - } -} - -static void abgrToA_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) -{ - int i; - for (i=0; i> 24)<<6; - } -} - -static void palToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, long width, uint32_t *pal) -{ - int i; - for (i=0; i> 8)<<6; - dstV[i]= (uint8_t)(p>>16)<<6; - } -} - -static void monowhite2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) -{ - int i, j; - for (i=0; i>(7-j))&1)*16383; - } - if(width&7){ - int d= ~src[i]; - for(j=0; j<(width&7); j++) - dst[8*i+j]= ((d>>(7-j))&1)*16383; - } -} - -static void monoblack2Y_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) -{ - int i, j; - for (i=0; i>(7-j))&1)*16383; - } - if(width&7){ - int d= src[i]; - for(j=0; j<(width&7); j++) - dst[8*i+j]= ((d>>(7-j))&1)*16383; - } -} - -static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, - uint32_t *unused) -{ - int i; - for (i=0; i>(RGB2YUV_SHIFT-6)); - } -} - -static void bgr24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) -{ - int i; - for (i=0; i>(RGB2YUV_SHIFT-6); - dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); - } - assert(src1 == src2); -} - -static void bgr24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) -{ - int i; - for (i=0; i>(RGB2YUV_SHIFT-5); - dstV[i]= (RV*r + GV*g + BV*b + (256<>(RGB2YUV_SHIFT-5); - } - assert(src1 == src2); -} - -static void rgb24ToY_c(int16_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, - uint32_t *unused) -{ - int i; - for (i=0; i>(RGB2YUV_SHIFT-6)); - } -} - -static void rgb24ToUV_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) -{ - int i; - assert(src1==src2); - for (i=0; i>(RGB2YUV_SHIFT-6); - dstV[i]= (RV*r + GV*g + BV*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); - } -} - -static void rgb24ToUV_half_c(int16_t *dstU, int16_t *dstV, const uint8_t *unused0, const uint8_t *src1, - const uint8_t *src2, int width, uint32_t *unused) -{ - int i; - assert(src1==src2); - for (i=0; i>(RGB2YUV_SHIFT-5); - dstV[i]= (RV*r + GV*g + BV*b + (256<>(RGB2YUV_SHIFT-5); - } -} - -static void planar_rgb_to_y(uint16_t *dst, const uint8_t *src[4], int width) -{ - int i; - for (i = 0; i < width; i++) { - int g = src[0][i]; - int b = src[1][i]; - int r = src[2][i]; - - dst[i] = (RY*r + GY*g + BY*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); - } -} - -static void planar_rgb16le_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) -{ - int i; - const uint16_t **src = (const uint16_t **) _src; - uint16_t *dst = (uint16_t *) _dst; - for (i = 0; i < width; i++) { - int g = AV_RL16(src[0] + i); - int b = AV_RL16(src[1] + i); - int r = AV_RL16(src[2] + i); - - dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); - } -} - -static void planar_rgb16be_to_y(uint8_t *_dst, const uint8_t *_src[4], int width) -{ - int i; - const uint16_t **src = (const uint16_t **) _src; - uint16_t *dst = (uint16_t *) _dst; - for (i = 0; i < width; i++) { - int g = AV_RB16(src[0] + i); - int b = AV_RB16(src[1] + i); - int r = AV_RB16(src[2] + i); - - dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT); - } -} - -static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width) -{ - int i; - for (i = 0; i < width; i++) { - int g = src[0][i]; - int b = src[1][i]; - int r = src[2][i]; - - dstU[i] = (RU*r + GU*g + BU*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); - dstV[i] = (RV*r + GV*g + BV*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); - } -} - -static void planar_rgb16le_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width) -{ - int i; - const uint16_t **src = (const uint16_t **) _src; - uint16_t *dstU = (uint16_t *) _dstU; - uint16_t *dstV = (uint16_t *) _dstV; - for (i = 0; i < width; i++) { - int g = AV_RL16(src[0] + i); - int b = AV_RL16(src[1] + i); - int r = AV_RL16(src[2] + i); - - dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - } -} - -static void planar_rgb16be_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *_src[4], int width) -{ - int i; - const uint16_t **src = (const uint16_t **) _src; - uint16_t *dstU = (uint16_t *) _dstU; - uint16_t *dstV = (uint16_t *) _dstV; - for (i = 0; i < width; i++) { - int g = AV_RB16(src[0] + i); - int b = AV_RB16(src[1] + i); - int r = AV_RB16(src[2] + i); - - dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1); - } -} - static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int16_t *filterPos, int filterSize) @@ -2244,253 +309,6 @@ static av_always_inline void hcscale(SwsContext *c, int16_t *dst1, int16_t *dst2 c->chrConvertRange(dst1, dst2, dstWidth); } -static av_always_inline void -find_c_packed_planar_out_funcs(SwsContext *c, - yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, - yuv2interleavedX_fn *yuv2nv12cX, - yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, - yuv2packedX_fn *yuv2packedX) -{ - enum PixelFormat dstFormat = c->dstFormat; - - if (is16BPS(dstFormat)) { - *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c; - *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c; - } else if (is9_OR_10BPS(dstFormat)) { - if (av_pix_fmt_descriptors[dstFormat].comp[0].depth_minus1 == 8) { - *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c; - *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c; - } else { - *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c; - *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c; - } - } else { - *yuv2plane1 = yuv2plane1_8_c; - *yuv2planeX = yuv2planeX_8_c; - if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) - *yuv2nv12cX = yuv2nv12cX_c; - } - - if(c->flags & SWS_FULL_CHR_H_INT) { - switch (dstFormat) { - case PIX_FMT_RGBA: -#if CONFIG_SMALL - *yuv2packedX = yuv2rgba32_full_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packedX = yuv2rgba32_full_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packedX = yuv2rgbx32_full_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_ARGB: -#if CONFIG_SMALL - *yuv2packedX = yuv2argb32_full_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packedX = yuv2argb32_full_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packedX = yuv2xrgb32_full_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_BGRA: -#if CONFIG_SMALL - *yuv2packedX = yuv2bgra32_full_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packedX = yuv2bgra32_full_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packedX = yuv2bgrx32_full_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_ABGR: -#if CONFIG_SMALL - *yuv2packedX = yuv2abgr32_full_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packedX = yuv2abgr32_full_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packedX = yuv2xbgr32_full_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_RGB24: - *yuv2packedX = yuv2rgb24_full_X_c; - break; - case PIX_FMT_BGR24: - *yuv2packedX = yuv2bgr24_full_X_c; - break; - } - if(!*yuv2packedX) - goto YUV_PACKED; - } else { - YUV_PACKED: - switch (dstFormat) { - case PIX_FMT_RGB48LE: - *yuv2packed1 = yuv2rgb48le_1_c; - *yuv2packed2 = yuv2rgb48le_2_c; - *yuv2packedX = yuv2rgb48le_X_c; - break; - case PIX_FMT_RGB48BE: - *yuv2packed1 = yuv2rgb48be_1_c; - *yuv2packed2 = yuv2rgb48be_2_c; - *yuv2packedX = yuv2rgb48be_X_c; - break; - case PIX_FMT_BGR48LE: - *yuv2packed1 = yuv2bgr48le_1_c; - *yuv2packed2 = yuv2bgr48le_2_c; - *yuv2packedX = yuv2bgr48le_X_c; - break; - case PIX_FMT_BGR48BE: - *yuv2packed1 = yuv2bgr48be_1_c; - *yuv2packed2 = yuv2bgr48be_2_c; - *yuv2packedX = yuv2bgr48be_X_c; - break; - case PIX_FMT_RGB32: - case PIX_FMT_BGR32: -#if CONFIG_SMALL - *yuv2packed1 = yuv2rgb32_1_c; - *yuv2packed2 = yuv2rgb32_2_c; - *yuv2packedX = yuv2rgb32_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packed1 = yuv2rgba32_1_c; - *yuv2packed2 = yuv2rgba32_2_c; - *yuv2packedX = yuv2rgba32_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packed1 = yuv2rgbx32_1_c; - *yuv2packed2 = yuv2rgbx32_2_c; - *yuv2packedX = yuv2rgbx32_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_RGB32_1: - case PIX_FMT_BGR32_1: -#if CONFIG_SMALL - *yuv2packed1 = yuv2rgb32_1_1_c; - *yuv2packed2 = yuv2rgb32_1_2_c; - *yuv2packedX = yuv2rgb32_1_X_c; -#else -#if CONFIG_SWSCALE_ALPHA - if (c->alpPixBuf) { - *yuv2packed1 = yuv2rgba32_1_1_c; - *yuv2packed2 = yuv2rgba32_1_2_c; - *yuv2packedX = yuv2rgba32_1_X_c; - } else -#endif /* CONFIG_SWSCALE_ALPHA */ - { - *yuv2packed1 = yuv2rgbx32_1_1_c; - *yuv2packed2 = yuv2rgbx32_1_2_c; - *yuv2packedX = yuv2rgbx32_1_X_c; - } -#endif /* !CONFIG_SMALL */ - break; - case PIX_FMT_RGB24: - *yuv2packed1 = yuv2rgb24_1_c; - *yuv2packed2 = yuv2rgb24_2_c; - *yuv2packedX = yuv2rgb24_X_c; - break; - case PIX_FMT_BGR24: - *yuv2packed1 = yuv2bgr24_1_c; - *yuv2packed2 = yuv2bgr24_2_c; - *yuv2packedX = yuv2bgr24_X_c; - break; - case PIX_FMT_RGB565LE: - case PIX_FMT_RGB565BE: - case PIX_FMT_BGR565LE: - case PIX_FMT_BGR565BE: - *yuv2packed1 = yuv2rgb16_1_c; - *yuv2packed2 = yuv2rgb16_2_c; - *yuv2packedX = yuv2rgb16_X_c; - break; - case PIX_FMT_RGB555LE: - case PIX_FMT_RGB555BE: - case PIX_FMT_BGR555LE: - case PIX_FMT_BGR555BE: - *yuv2packed1 = yuv2rgb15_1_c; - *yuv2packed2 = yuv2rgb15_2_c; - *yuv2packedX = yuv2rgb15_X_c; - break; - case PIX_FMT_RGB444LE: - case PIX_FMT_RGB444BE: - case PIX_FMT_BGR444LE: - case PIX_FMT_BGR444BE: - *yuv2packed1 = yuv2rgb12_1_c; - *yuv2packed2 = yuv2rgb12_2_c; - *yuv2packedX = yuv2rgb12_X_c; - break; - case PIX_FMT_RGB8: - case PIX_FMT_BGR8: - *yuv2packed1 = yuv2rgb8_1_c; - *yuv2packed2 = yuv2rgb8_2_c; - *yuv2packedX = yuv2rgb8_X_c; - break; - case PIX_FMT_RGB4: - case PIX_FMT_BGR4: - *yuv2packed1 = yuv2rgb4_1_c; - *yuv2packed2 = yuv2rgb4_2_c; - *yuv2packedX = yuv2rgb4_X_c; - break; - case PIX_FMT_RGB4_BYTE: - case PIX_FMT_BGR4_BYTE: - *yuv2packed1 = yuv2rgb4b_1_c; - *yuv2packed2 = yuv2rgb4b_2_c; - *yuv2packedX = yuv2rgb4b_X_c; - break; - } - } - switch (dstFormat) { - case PIX_FMT_GRAY16BE: - *yuv2packed1 = yuv2gray16BE_1_c; - *yuv2packed2 = yuv2gray16BE_2_c; - *yuv2packedX = yuv2gray16BE_X_c; - break; - case PIX_FMT_GRAY16LE: - *yuv2packed1 = yuv2gray16LE_1_c; - *yuv2packed2 = yuv2gray16LE_2_c; - *yuv2packedX = yuv2gray16LE_X_c; - break; - case PIX_FMT_MONOWHITE: - *yuv2packed1 = yuv2monowhite_1_c; - *yuv2packed2 = yuv2monowhite_2_c; - *yuv2packedX = yuv2monowhite_X_c; - break; - case PIX_FMT_MONOBLACK: - *yuv2packed1 = yuv2monoblack_1_c; - *yuv2packed2 = yuv2monoblack_2_c; - *yuv2packedX = yuv2monoblack_X_c; - break; - case PIX_FMT_YUYV422: - *yuv2packed1 = yuv2yuyv422_1_c; - *yuv2packed2 = yuv2yuyv422_2_c; - *yuv2packedX = yuv2yuyv422_X_c; - break; - case PIX_FMT_UYVY422: - *yuv2packed1 = yuv2uyvy422_1_c; - *yuv2packed2 = yuv2uyvy422_2_c; - *yuv2packedX = yuv2uyvy422_X_c; - break; - } -} - #define DEBUG_SWSCALE_BUFFERS 0 #define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__) @@ -2708,8 +526,8 @@ static int swScale(SwsContext *c, const uint8_t* src[], } if (dstY >= dstH-2) { // hmm looks like we can't use MMX here without overwriting this array's tail - find_c_packed_planar_out_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX, - &yuv2packed1, &yuv2packed2, &yuv2packedX); + ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX, + &yuv2packed1, &yuv2packed2, &yuv2packedX); use_mmx_vfilter= 0; } @@ -2727,14 +545,14 @@ static int swScale(SwsContext *c, const uint8_t* src[], vLumFilter += dstY * vLumFilterSize; vChrFilter += chrDstY * vChrFilterSize; - av_assert0(use_mmx_vfilter != ( - yuv2planeX == yuv2planeX_10BE_c - || yuv2planeX == yuv2planeX_10LE_c - || yuv2planeX == yuv2planeX_9BE_c - || yuv2planeX == yuv2planeX_9LE_c - || yuv2planeX == yuv2planeX_16BE_c - || yuv2planeX == yuv2planeX_16LE_c - || yuv2planeX == yuv2planeX_8_c) || !ARCH_X86); +// av_assert0(use_mmx_vfilter != ( +// yuv2planeX == yuv2planeX_10BE_c +// || yuv2planeX == yuv2planeX_10LE_c +// || yuv2planeX == yuv2planeX_9BE_c +// || yuv2planeX == yuv2planeX_9LE_c +// || yuv2planeX == yuv2planeX_16BE_c +// || yuv2planeX == yuv2planeX_16LE_c +// || yuv2planeX == yuv2planeX_8_c) || !ARCH_X86); if(use_mmx_vfilter){ vLumFilter= c->lumMmxFilter; @@ -2825,179 +643,11 @@ static av_cold void sws_init_swScale_c(SwsContext *c) { enum PixelFormat srcFormat = c->srcFormat; - find_c_packed_planar_out_funcs(c, &c->yuv2plane1, &c->yuv2planeX, - &c->yuv2nv12cX, &c->yuv2packed1, &c->yuv2packed2, - &c->yuv2packedX); + ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX, + &c->yuv2nv12cX, &c->yuv2packed1, + &c->yuv2packed2, &c->yuv2packedX); - c->chrToYV12 = NULL; - switch(srcFormat) { - case PIX_FMT_YUYV422 : c->chrToYV12 = yuy2ToUV_c; break; - case PIX_FMT_UYVY422 : c->chrToYV12 = uyvyToUV_c; break; - case PIX_FMT_NV12 : c->chrToYV12 = nv12ToUV_c; break; - case PIX_FMT_NV21 : c->chrToYV12 = nv21ToUV_c; break; - case PIX_FMT_RGB8 : - case PIX_FMT_BGR8 : - case PIX_FMT_PAL8 : - case PIX_FMT_BGR4_BYTE: - case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV_c; break; - case PIX_FMT_GBRP9LE: - case PIX_FMT_GBRP10LE: - case PIX_FMT_GBRP16LE: c->readChrPlanar = planar_rgb16le_to_uv; break; - case PIX_FMT_GBRP9BE: - case PIX_FMT_GBRP10BE: - case PIX_FMT_GBRP16BE: c->readChrPlanar = planar_rgb16be_to_uv; break; - case PIX_FMT_GBRP: c->readChrPlanar = planar_rgb_to_uv; break; -#if HAVE_BIGENDIAN - case PIX_FMT_YUV444P9LE: - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV420P9LE: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV420P10LE: - case PIX_FMT_YUV444P10LE: - case PIX_FMT_YUV420P16LE: - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: c->chrToYV12 = bswap16UV_c; break; -#else - case PIX_FMT_YUV444P9BE: - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV420P9BE: - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: - case PIX_FMT_YUV420P10BE: - case PIX_FMT_YUV420P16BE: - case PIX_FMT_YUV422P16BE: - case PIX_FMT_YUV444P16BE: c->chrToYV12 = bswap16UV_c; break; -#endif - } - if (c->chrSrcHSubSample) { - switch(srcFormat) { - case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_half_c; break; - case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_half_c; break; - case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_half_c; break; - case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_half_c; break; - case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_half_c; break; - case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_half_c; break; - case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_half_c; break; - case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_half_c; break; - case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_half_c; break; - case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_half_c; break; - case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_half_c; break; - case PIX_FMT_BGR444LE: c->chrToYV12 = bgr12leToUV_half_c; break; - case PIX_FMT_BGR444BE: c->chrToYV12 = bgr12beToUV_half_c; break; - case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_half_c; break; - case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_half_c; break; - case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_half_c; break; - case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_half_c; break; - case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_half_c; break; - case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_half_c; break; - case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_half_c; break; - case PIX_FMT_GBR24P : c->chrToYV12 = gbr24pToUV_half_c; break; - case PIX_FMT_RGB444LE: c->chrToYV12 = rgb12leToUV_half_c; break; - case PIX_FMT_RGB444BE: c->chrToYV12 = rgb12beToUV_half_c; break; - } - } else { - switch(srcFormat) { - case PIX_FMT_RGB48BE : c->chrToYV12 = rgb48BEToUV_c; break; - case PIX_FMT_RGB48LE : c->chrToYV12 = rgb48LEToUV_c; break; - case PIX_FMT_BGR48BE : c->chrToYV12 = bgr48BEToUV_c; break; - case PIX_FMT_BGR48LE : c->chrToYV12 = bgr48LEToUV_c; break; - case PIX_FMT_RGB32 : c->chrToYV12 = bgr32ToUV_c; break; - case PIX_FMT_RGB32_1 : c->chrToYV12 = bgr321ToUV_c; break; - case PIX_FMT_BGR24 : c->chrToYV12 = bgr24ToUV_c; break; - case PIX_FMT_BGR565LE: c->chrToYV12 = bgr16leToUV_c; break; - case PIX_FMT_BGR565BE: c->chrToYV12 = bgr16beToUV_c; break; - case PIX_FMT_BGR555LE: c->chrToYV12 = bgr15leToUV_c; break; - case PIX_FMT_BGR555BE: c->chrToYV12 = bgr15beToUV_c; break; - case PIX_FMT_BGR444LE: c->chrToYV12 = bgr12leToUV_c; break; - case PIX_FMT_BGR444BE: c->chrToYV12 = bgr12beToUV_c; break; - case PIX_FMT_BGR32 : c->chrToYV12 = rgb32ToUV_c; break; - case PIX_FMT_BGR32_1 : c->chrToYV12 = rgb321ToUV_c; break; - case PIX_FMT_RGB24 : c->chrToYV12 = rgb24ToUV_c; break; - case PIX_FMT_RGB565LE: c->chrToYV12 = rgb16leToUV_c; break; - case PIX_FMT_RGB565BE: c->chrToYV12 = rgb16beToUV_c; break; - case PIX_FMT_RGB555LE: c->chrToYV12 = rgb15leToUV_c; break; - case PIX_FMT_RGB555BE: c->chrToYV12 = rgb15beToUV_c; break; - case PIX_FMT_RGB444LE: c->chrToYV12 = rgb12leToUV_c; break; - case PIX_FMT_RGB444BE: c->chrToYV12 = rgb12beToUV_c; break; - } - } - - c->lumToYV12 = NULL; - c->alpToYV12 = NULL; - switch (srcFormat) { - case PIX_FMT_GBRP9LE: - case PIX_FMT_GBRP10LE: - case PIX_FMT_GBRP16LE: c->readLumPlanar = planar_rgb16le_to_y; break; - case PIX_FMT_GBRP9BE: - case PIX_FMT_GBRP10BE: - case PIX_FMT_GBRP16BE: c->readLumPlanar = planar_rgb16be_to_y; break; - case PIX_FMT_GBRP: c->readLumPlanar = planar_rgb_to_y; break; -#if HAVE_BIGENDIAN - case PIX_FMT_YUV444P9LE: - case PIX_FMT_YUV422P9LE: - case PIX_FMT_YUV420P9LE: - case PIX_FMT_YUV422P10LE: - case PIX_FMT_YUV420P10LE: - case PIX_FMT_YUV444P10LE: - case PIX_FMT_YUV420P16LE: - case PIX_FMT_YUV422P16LE: - case PIX_FMT_YUV444P16LE: - case PIX_FMT_GRAY16LE: c->lumToYV12 = bswap16Y_c; break; -#else - case PIX_FMT_YUV444P9BE: - case PIX_FMT_YUV422P9BE: - case PIX_FMT_YUV420P9BE: - case PIX_FMT_YUV444P10BE: - case PIX_FMT_YUV422P10BE: - case PIX_FMT_YUV420P10BE: - case PIX_FMT_YUV420P16BE: - case PIX_FMT_YUV422P16BE: - case PIX_FMT_YUV444P16BE: - case PIX_FMT_GRAY16BE: c->lumToYV12 = bswap16Y_c; break; -#endif - case PIX_FMT_YUYV422 : - case PIX_FMT_Y400A : c->lumToYV12 = yuy2ToY_c; break; - case PIX_FMT_UYVY422 : c->lumToYV12 = uyvyToY_c; break; - case PIX_FMT_BGR24 : c->lumToYV12 = bgr24ToY_c; break; - case PIX_FMT_BGR565LE : c->lumToYV12 = bgr16leToY_c; break; - case PIX_FMT_BGR565BE : c->lumToYV12 = bgr16beToY_c; break; - case PIX_FMT_BGR555LE : c->lumToYV12 = bgr15leToY_c; break; - case PIX_FMT_BGR555BE : c->lumToYV12 = bgr15beToY_c; break; - case PIX_FMT_BGR444LE : c->lumToYV12 = bgr12leToY_c; break; - case PIX_FMT_BGR444BE : c->lumToYV12 = bgr12beToY_c; break; - case PIX_FMT_RGB24 : c->lumToYV12 = rgb24ToY_c; break; - case PIX_FMT_RGB565LE : c->lumToYV12 = rgb16leToY_c; break; - case PIX_FMT_RGB565BE : c->lumToYV12 = rgb16beToY_c; break; - case PIX_FMT_RGB555LE : c->lumToYV12 = rgb15leToY_c; break; - case PIX_FMT_RGB555BE : c->lumToYV12 = rgb15beToY_c; break; - case PIX_FMT_RGB444LE : c->lumToYV12 = rgb12leToY_c; break; - case PIX_FMT_RGB444BE : c->lumToYV12 = rgb12beToY_c; break; - case PIX_FMT_RGB8 : - case PIX_FMT_BGR8 : - case PIX_FMT_PAL8 : - case PIX_FMT_BGR4_BYTE: - case PIX_FMT_RGB4_BYTE: c->lumToYV12 = palToY_c; break; - case PIX_FMT_MONOBLACK: c->lumToYV12 = monoblack2Y_c; break; - case PIX_FMT_MONOWHITE: c->lumToYV12 = monowhite2Y_c; break; - case PIX_FMT_RGB32 : c->lumToYV12 = bgr32ToY_c; break; - case PIX_FMT_RGB32_1: c->lumToYV12 = bgr321ToY_c; break; - case PIX_FMT_BGR32 : c->lumToYV12 = rgb32ToY_c; break; - case PIX_FMT_BGR32_1: c->lumToYV12 = rgb321ToY_c; break; - case PIX_FMT_RGB48BE: c->lumToYV12 = rgb48BEToY_c; break; - case PIX_FMT_RGB48LE: c->lumToYV12 = rgb48LEToY_c; break; - case PIX_FMT_BGR48BE: c->lumToYV12 = bgr48BEToY_c; break; - case PIX_FMT_BGR48LE: c->lumToYV12 = bgr48LEToY_c; break; - } - if (c->alpPixBuf) { - switch (srcFormat) { - case PIX_FMT_BGRA: - case PIX_FMT_RGBA: c->alpToYV12 = rgbaToA_c; break; - case PIX_FMT_ABGR: - case PIX_FMT_ARGB: c->alpToYV12 = abgrToA_c; break; - case PIX_FMT_Y400A: c->alpToYV12 = uyvyToY_c; break; - case PIX_FMT_PAL8 : c->alpToYV12 = palToA_c; break; - } - } + ff_sws_init_input_funcs(c); if (c->srcBpc == 8) { diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h index 3c4f3953c1..80abf621f7 100644 --- a/libswscale/swscale_internal.h +++ b/libswscale/swscale_internal.h @@ -359,8 +359,8 @@ typedef struct SwsContext { #define V_TEMP "11*8+4*4*256*2+32" #define Y_TEMP "11*8+4*4*256*2+40" #define ALP_MMX_FILTER_OFFSET "11*8+4*4*256*2+48" -#define UV_OFF "11*8+4*4*256*3+48" -#define UV_OFFx2 "11*8+4*4*256*3+56" +#define UV_OFF_PX "11*8+4*4*256*3+48" +#define UV_OFF_BYTE "11*8+4*4*256*3+56" #define DITHER16 "11*8+4*4*256*3+64" #define DITHER32 "11*8+4*4*256*3+80" @@ -706,6 +706,14 @@ void ff_swscale_get_unscaled_altivec(SwsContext *c); */ SwsFunc ff_getSwsFunc(SwsContext *c); +void ff_sws_init_input_funcs(SwsContext *c); +void ff_sws_init_output_funcs(SwsContext *c, + yuv2planar1_fn *yuv2plane1, + yuv2planarX_fn *yuv2planeX, + yuv2interleavedX_fn *yuv2nv12cX, + yuv2packed1_fn *yuv2packed1, + yuv2packed2_fn *yuv2packed2, + yuv2packedX_fn *yuv2packedX); void ff_sws_init_swScale_altivec(SwsContext *c); void ff_sws_init_swScale_mmx(SwsContext *c); diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c index e1ba79926d..dd7b04c1eb 100644 --- a/libswscale/swscale_unscaled.c +++ b/libswscale/swscale_unscaled.c @@ -45,6 +45,102 @@ #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5)) +DECLARE_ALIGNED(8, const uint8_t, dithers)[8][8][8]={ +{ + { 0, 1, 0, 1, 0, 1, 0, 1,}, + { 1, 0, 1, 0, 1, 0, 1, 0,}, + { 0, 1, 0, 1, 0, 1, 0, 1,}, + { 1, 0, 1, 0, 1, 0, 1, 0,}, + { 0, 1, 0, 1, 0, 1, 0, 1,}, + { 1, 0, 1, 0, 1, 0, 1, 0,}, + { 0, 1, 0, 1, 0, 1, 0, 1,}, + { 1, 0, 1, 0, 1, 0, 1, 0,}, +},{ + { 1, 2, 1, 2, 1, 2, 1, 2,}, + { 3, 0, 3, 0, 3, 0, 3, 0,}, + { 1, 2, 1, 2, 1, 2, 1, 2,}, + { 3, 0, 3, 0, 3, 0, 3, 0,}, + { 1, 2, 1, 2, 1, 2, 1, 2,}, + { 3, 0, 3, 0, 3, 0, 3, 0,}, + { 1, 2, 1, 2, 1, 2, 1, 2,}, + { 3, 0, 3, 0, 3, 0, 3, 0,}, +},{ + { 2, 4, 3, 5, 2, 4, 3, 5,}, + { 6, 0, 7, 1, 6, 0, 7, 1,}, + { 3, 5, 2, 4, 3, 5, 2, 4,}, + { 7, 1, 6, 0, 7, 1, 6, 0,}, + { 2, 4, 3, 5, 2, 4, 3, 5,}, + { 6, 0, 7, 1, 6, 0, 7, 1,}, + { 3, 5, 2, 4, 3, 5, 2, 4,}, + { 7, 1, 6, 0, 7, 1, 6, 0,}, +},{ + { 4, 8, 7, 11, 4, 8, 7, 11,}, + { 12, 0, 15, 3, 12, 0, 15, 3,}, + { 6, 10, 5, 9, 6, 10, 5, 9,}, + { 14, 2, 13, 1, 14, 2, 13, 1,}, + { 4, 8, 7, 11, 4, 8, 7, 11,}, + { 12, 0, 15, 3, 12, 0, 15, 3,}, + { 6, 10, 5, 9, 6, 10, 5, 9,}, + { 14, 2, 13, 1, 14, 2, 13, 1,}, +},{ + { 9, 17, 15, 23, 8, 16, 14, 22,}, + { 25, 1, 31, 7, 24, 0, 30, 6,}, + { 13, 21, 11, 19, 12, 20, 10, 18,}, + { 29, 5, 27, 3, 28, 4, 26, 2,}, + { 8, 16, 14, 22, 9, 17, 15, 23,}, + { 24, 0, 30, 6, 25, 1, 31, 7,}, + { 12, 20, 10, 18, 13, 21, 11, 19,}, + { 28, 4, 26, 2, 29, 5, 27, 3,}, +},{ + { 18, 34, 30, 46, 17, 33, 29, 45,}, + { 50, 2, 62, 14, 49, 1, 61, 13,}, + { 26, 42, 22, 38, 25, 41, 21, 37,}, + { 58, 10, 54, 6, 57, 9, 53, 5,}, + { 16, 32, 28, 44, 19, 35, 31, 47,}, + { 48, 0, 60, 12, 51, 3, 63, 15,}, + { 24, 40, 20, 36, 27, 43, 23, 39,}, + { 56, 8, 52, 4, 59, 11, 55, 7,}, +},{ + { 18, 34, 30, 46, 17, 33, 29, 45,}, + { 50, 2, 62, 14, 49, 1, 61, 13,}, + { 26, 42, 22, 38, 25, 41, 21, 37,}, + { 58, 10, 54, 6, 57, 9, 53, 5,}, + { 16, 32, 28, 44, 19, 35, 31, 47,}, + { 48, 0, 60, 12, 51, 3, 63, 15,}, + { 24, 40, 20, 36, 27, 43, 23, 39,}, + { 56, 8, 52, 4, 59, 11, 55, 7,}, +},{ + { 36, 68, 60, 92, 34, 66, 58, 90,}, + { 100, 4,124, 28, 98, 2,122, 26,}, + { 52, 84, 44, 76, 50, 82, 42, 74,}, + { 116, 20,108, 12,114, 18,106, 10,}, + { 32, 64, 56, 88, 38, 70, 62, 94,}, + { 96, 0,120, 24,102, 6,126, 30,}, + { 48, 80, 40, 72, 54, 86, 46, 78,}, + { 112, 16,104, 8,118, 22,110, 14,}, +}}; + +static const uint8_t flat64[8]={64,64,64,64,64,64,64,64}; + +const uint16_t dither_scale[15][16]={ +{ 2, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,}, +{ 2, 3, 7, 7, 13, 13, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,}, +{ 3, 3, 4, 15, 15, 29, 57, 57, 57, 113, 113, 113, 113, 113, 113, 113,}, +{ 3, 4, 4, 5, 31, 31, 61, 121, 241, 241, 241, 241, 481, 481, 481, 481,}, +{ 3, 4, 5, 5, 6, 63, 63, 125, 249, 497, 993, 993, 993, 993, 993, 1985,}, +{ 3, 5, 6, 6, 6, 7, 127, 127, 253, 505, 1009, 2017, 4033, 4033, 4033, 4033,}, +{ 3, 5, 6, 7, 7, 7, 8, 255, 255, 509, 1017, 2033, 4065, 8129,16257,16257,}, +{ 3, 5, 6, 8, 8, 8, 8, 9, 511, 511, 1021, 2041, 4081, 8161,16321,32641,}, +{ 3, 5, 7, 8, 9, 9, 9, 9, 10, 1023, 1023, 2045, 4089, 8177,16353,32705,}, +{ 3, 5, 7, 8, 10, 10, 10, 10, 10, 11, 2047, 2047, 4093, 8185,16369,32737,}, +{ 3, 5, 7, 8, 10, 11, 11, 11, 11, 11, 12, 4095, 4095, 8189,16377,32753,}, +{ 3, 5, 7, 9, 10, 12, 12, 12, 12, 12, 12, 13, 8191, 8191,16381,32761,}, +{ 3, 5, 7, 9, 10, 12, 13, 13, 13, 13, 13, 13, 14,16383,16383,32765,}, +{ 3, 5, 7, 9, 10, 12, 14, 14, 14, 14, 14, 14, 14, 15,32767,32767,}, +{ 3, 5, 7, 9, 11, 12, 14, 15, 15, 15, 15, 15, 15, 15, 16,65535,}, +}; + + static void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val) { diff --git a/libswscale/x86/output.asm b/libswscale/x86/output.asm index 70a2c16bcf..4b2f5c89eb 100644 --- a/libswscale/x86/output.asm +++ b/libswscale/x86/output.asm @@ -275,17 +275,17 @@ yuv2planeX_fn 10, 7, 5 %macro yuv2plane1_mainloop 2 .loop_%2: %if %1 == 8 - paddsw m0, m2, [r0+r2*2+mmsize*0] - paddsw m1, m3, [r0+r2*2+mmsize*1] + paddsw m0, m2, [srcq+dstwq*2+mmsize*0] + paddsw m1, m3, [srcq+dstwq*2+mmsize*1] psraw m0, 7 psraw m1, 7 packuswb m0, m1 mov%2 [r1+r2], m0 %elif %1 == 16 - paddd m0, m4, [r0+r2*4+mmsize*0] - paddd m1, m4, [r0+r2*4+mmsize*1] - paddd m2, m4, [r0+r2*4+mmsize*2] - paddd m3, m4, [r0+r2*4+mmsize*3] + paddd m0, m4, [srcq+dstwq*4+mmsize*0] + paddd m1, m4, [srcq+dstwq*4+mmsize*1] + paddd m2, m4, [srcq+dstwq*4+mmsize*2] + paddd m3, m4, [srcq+dstwq*4+mmsize*3] psrad m0, 3 psrad m1, 3 psrad m2, 3 @@ -299,46 +299,46 @@ yuv2planeX_fn 10, 7, 5 paddw m0, m5 paddw m2, m5 %endif ; mmx/sse2/sse4/avx - mov%2 [r1+r2*2], m0 - mov%2 [r1+r2*2+mmsize], m2 -%else - paddsw m0, m2, [r0+r2*2+mmsize*0] - paddsw m1, m2, [r0+r2*2+mmsize*1] + mov%2 [dstq+dstwq*2+mmsize*0], m0 + mov%2 [dstq+dstwq*2+mmsize*1], m2 +%else ; %1 == 9/10 + paddsw m0, m2, [srcq+dstwq*2+mmsize*0] + paddsw m1, m2, [srcq+dstwq*2+mmsize*1] psraw m0, 15 - %1 psraw m1, 15 - %1 pmaxsw m0, m4 pmaxsw m1, m4 pminsw m0, m3 pminsw m1, m3 - mov%2 [r1+r2*2], m0 - mov%2 [r1+r2*2+mmsize], m1 + mov%2 [dstq+dstwq*2+mmsize*0], m0 + mov%2 [dstq+dstwq*2+mmsize*1], m1 %endif - add r2, mmsize + add dstwq, mmsize jl .loop_%2 %endmacro %macro yuv2plane1_fn 3 -cglobal yuv2plane1_%1, %3, %3, %2 - add r2, mmsize - 1 - and r2, ~(mmsize - 1) +cglobal yuv2plane1_%1, %3, %3, %2, src, dst, dstw, dither, offset + add dstwq, mmsize - 1 + and dstwq, ~(mmsize - 1) %if %1 == 8 - add r1, r2 + add dstq, dstwq %else ; %1 != 8 - lea r1, [r1+r2*2] + lea dstq, [dstq+dstwq*2] %endif ; %1 == 8 %if %1 == 16 - lea r0, [r0+r2*4] + lea srcq, [srcq+dstwq*4] %else ; %1 != 16 - lea r0, [r0+r2*2] + lea srcq, [srcq+dstwq*2] %endif ; %1 == 16 - neg r2 + neg dstwq %if %1 == 8 pxor m4, m4 ; zero ; create registers holding dither - movq m3, [r3] ; dither - test r4d, r4d + movq m3, [ditherq] ; dither + test offsetd, offsetd jz .no_rot %if mmsize == 16 punpcklqdq m3, m3 @@ -374,7 +374,7 @@ cglobal yuv2plane1_%1, %3, %3, %2 %if mmsize == 8 yuv2plane1_mainloop %1, a %else ; mmsize == 16 - test r1, 15 + test dstq, 15 jnz .unaligned yuv2plane1_mainloop %1, a REP_RET diff --git a/libswscale/x86/swscale_template.c b/libswscale/x86/swscale_template.c index 8a98c7b924..d9e5cbbf44 100644 --- a/libswscale/x86/swscale_template.c +++ b/libswscale/x86/swscale_template.c @@ -762,10 +762,10 @@ static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter, "1: \n\t"\ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\ @@ -993,10 +993,10 @@ static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2], "1: \n\t"\ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\ @@ -1048,9 +1048,9 @@ static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2], ".p2align 4 \n\t"\ "1: \n\t"\ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ @@ -1101,10 +1101,10 @@ static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2], "1: \n\t"\ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\ @@ -1368,9 +1368,9 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, ".p2align 4 \n\t"\ "1: \n\t"\ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "psraw $7, %%mm3 \n\t" \ "psraw $7, %%mm4 \n\t" \ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ @@ -1386,10 +1386,10 @@ static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0, "1: \n\t"\ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ - "add "UV_OFFx2"("#c"), "#index" \n\t" \ + "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ - "sub "UV_OFFx2"("#c"), "#index" \n\t" \ + "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ "psrlw $8, %%mm3 \n\t" \