/*
 * DSP utils
 * Copyright (c) 2000, 2001 Fabrice Bellard
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
 *
 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
 *
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

/**
 * @file
 * DSP utils
 */

#include "libavutil/imgutils.h"
#include "avcodec.h"
#include "dsputil.h"
#include "simple_idct.h"
#include "faandct.h"
#include "faanidct.h"
#include "mpegvideo.h"
#include "config.h"
#include "high_bit_depth.h"

uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
uint32_t ff_squareTbl[512] = {0, };

#define BIT_DEPTH 8
//#include "dsputil_template.c"

// 0x7f7f7f7f or 0x7f7f7f7f7f7f7f7f or whatever, depending on the cpu's native arithmetic size
#define pb_7f (~0UL/255 * 0x7f)
#define pb_80 (~0UL/255 * 0x80)

const uint8_t ff_zigzag_direct[64] =
{
    0,   1,  8, 16,  9,  2,  3, 10,
    17, 24, 32, 25, 18, 11,  4,  5,
    12, 19, 26, 33, 40, 48, 41, 34,
    27, 20, 13,  6,  7, 14, 21, 28,
    35, 42, 49, 56, 57, 50, 43, 36,
    29, 22, 15, 23, 30, 37, 44, 51,
    58, 59, 52, 45, 38, 31, 39, 46,
    53, 60, 61, 54, 47, 55, 62, 63
};

/* Specific zigzag scan for 248 idct. NOTE that unlike the
   specification, we interleave the fields */
const uint8_t ff_zigzag248_direct[64] =
{
    0,  8,  1,  9, 16, 24,  2, 10,
    17, 25, 32, 40, 48, 56, 33, 41,
    18, 26,  3, 11,  4, 12, 19, 27,
    34, 42, 49, 57, 50, 58, 35, 43,
    20, 28,  5, 13,  6, 14, 21, 29,
    36, 44, 51, 59, 52, 60, 37, 45,
    22, 30,  7, 15, 23, 31, 38, 46,
    53, 61, 54, 62, 39, 47, 55, 63,
};

/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, uint16_t, inv_zigzag_direct16)[64];

const uint8_t ff_alternate_horizontal_scan[64] =
{
    0,  1,   2,  3,  8,  9, 16, 17,
    10, 11,  4,  5,  6,  7, 15, 14,
    13, 12, 19, 18, 24, 25, 32, 33,
    26, 27, 20, 21, 22, 23, 28, 29,
    30, 31, 34, 35, 40, 41, 48, 49,
    42, 43, 36, 37, 38, 39, 44, 45,
    46, 47, 50, 51, 56, 57, 58, 59,
    52, 53, 54, 55, 60, 61, 62, 63,
};

const uint8_t ff_alternate_vertical_scan[64] =
{
    0,  8,  16, 24,  1,  9,  2, 10,
    17, 25, 32, 40, 48, 56, 57, 49,
    41, 33, 26, 18,  3, 11,  4, 12,
    19, 27, 34, 42, 50, 58, 35, 43,
    51, 59, 20, 28,  5, 13,  6, 14,
    21, 29, 36, 44, 52, 60, 37, 45,
    53, 61, 22, 30,  7, 15, 23, 31,
    38, 46, 54, 62, 39, 47, 55, 63,
};

/* Input permutation for the simple_idct_mmx */
static const uint8_t simple_mmx_permutation[64] =
{
    0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
    0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
    0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
    0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
    0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
    0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
    0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
    0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};

static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        AV_WN2P(dst, AV_RN2P(src));
        dst += dstStride;
        src += srcStride;
    }
}

static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        AV_WN4P(dst, AV_RN4P(src));
        dst += dstStride;
        src += srcStride;
    }
}

static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        AV_WN4P(dst, AV_RN4P(src));
        AV_WN4P(dst + 4 * sizeof(pixel), AV_RN4P(src + 4 * sizeof(pixel)));
        dst += dstStride;
        src += srcStride;
    }
}

static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        AV_WN4P(dst, AV_RN4P(src));
        AV_WN4P(dst + 4 * sizeof(pixel), AV_RN4P(src + 4 * sizeof(pixel)));
        AV_WN4P(dst + 8 * sizeof(pixel), AV_RN4P(src + 8 * sizeof(pixel)));
        AV_WN4P(dst + 12 * sizeof(pixel), AV_RN4P(src + 12 * sizeof(pixel)));
        dst += dstStride;
        src += srcStride;
    }
}

/* draw the edges of width 'w' of an image of size width, height */
//FIXME check that this is ok for mpeg4 interlaced
static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int h, int sides)
{
    pixel *buf = (pixel *)p_buf;
    int wrap = p_wrap / sizeof(pixel);
    pixel *ptr, *last_line;
    int i;

    /* left and right */
    ptr = buf;
    for(i = 0; i < height; i++)
    {
        memset(ptr - w, ptr[0], w);
        memset(ptr + width, ptr[width-1], w);
        ptr += wrap;
    }

    /* top and bottom + corners */
    buf -= w;
    last_line = buf + (height - 1) * wrap;
    if (sides & EDGE_TOP)
        for(i = 0; i < h; i++)
            memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
    if (sides & EDGE_BOTTOM)
        for (i = 0; i < h; i++)
            memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
}

/**
 * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
 * @param buf destination buffer
 * @param src source buffer
 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
 * @param block_w width of block
 * @param block_h height of block
 * @param src_x x coordinate of the top left sample of the block in the source buffer
 * @param src_y y coordinate of the top left sample of the block in the source buffer
 * @param w width of the source buffer
 * @param h height of the source buffer
 */
void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
                               int src_x, int src_y, int w, int h)
{
    int x, y;
    int start_y, start_x, end_y, end_x;

    if(src_y >= h)
    {
        src += (h - 1 - src_y) * linesize;
        src_y = h - 1;
    }
    else if(src_y <= -block_h)
    {
        src += (1 - block_h - src_y) * linesize;
        src_y = 1 - block_h;
    }
    if(src_x >= w)
    {
        src += (w - 1 - src_x) * sizeof(pixel);
        src_x = w - 1;
    }
    else if(src_x <= -block_w)
    {
        src += (1 - block_w - src_x) * sizeof(pixel);
        src_x = 1 - block_w;
    }

    start_y = FFMAX(0, -src_y);
    start_x = FFMAX(0, -src_x);
    end_y = FFMIN(block_h, h - src_y);
    end_x = FFMIN(block_w, w - src_x);
    assert(start_y < end_y && block_h);
    assert(start_x < end_x && block_w);

    w    = end_x - start_x;
    src += start_y * linesize + start_x * sizeof(pixel);
    buf += start_x * sizeof(pixel);

    //top
    for(y = 0; y < start_y; y++)
    {
        memcpy(buf, src, w * sizeof(pixel));
        buf += linesize;
    }

    // copy existing part
    for(; y < end_y; y++)
    {
        memcpy(buf, src, w * sizeof(pixel));
        src += linesize;
        buf += linesize;
    }

    //bottom
    src -= linesize;
    for(; y < block_h; y++)
    {
        memcpy(buf, src, w * sizeof(pixel));
        buf += linesize;
    }

    buf -= block_h * linesize + start_x * sizeof(pixel);
    while (block_h--)
    {
        pixel *bufp = (pixel *)buf;
        //left
        for(x = 0; x < start_x; x++)
        {
            bufp[x] = bufp[start_x];
        }

        //right
        for(x = end_x; x < block_w; x++)
        {
            bufp[x] = bufp[end_x - 1];
        }
        buf += linesize;
    }
}

static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
    int i;
    pixel *restrict pixels = (pixel * restrict)p_pixels;
    dctcoef *block = (dctcoef *)p_block;
    line_size >>= sizeof(pixel) - 1;

    for(i = 0; i < 8; i++)
    {
        pixels[0] += block[0];
        pixels[1] += block[1];
        pixels[2] += block[2];
        pixels[3] += block[3];
        pixels[4] += block[4];
        pixels[5] += block[5];
        pixels[6] += block[6];
        pixels[7] += block[7];
        pixels += line_size;
        block += 8;
    }
}

static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
    int i;
    pixel *restrict pixels = (pixel * restrict)p_pixels;
    dctcoef *block = (dctcoef *)p_block;
    line_size >>= sizeof(pixel) - 1;

    for(i = 0; i < 4; i++)
    {
        pixels[0] += block[0];
        pixels[1] += block[1];
        pixels[2] += block[2];
        pixels[3] += block[3];
        pixels += line_size;
        block += 4;
    }
}

#if 0
static void avg_pixels2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint16_t *)(block )) = rnd_avg32(*((uint16_t *)(block )), (((const union unaligned_16 *) (pixels))->l));
        pixels += line_size;
        block += line_size;
    }
}
#endif
static void avg_pixels4_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint32_t *)(block )) = rnd_avg32(*((uint32_t *)(block )), (((const union unaligned_32 *) (pixels))->l));
        pixels += line_size;
        block += line_size;
    }
}
static void avg_pixels8_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint32_t *)(block )) = rnd_avg32(*((uint32_t *)(block )), (((const union unaligned_32 *) (pixels))->l));
        *((uint32_t *)(block + 4 * sizeof(uint8_t))) = rnd_avg32(*((uint32_t *)(block + 4 * sizeof(uint8_t))), (((const union unaligned_32 *) (pixels + 4 * sizeof(uint8_t)))->l));
        pixels += line_size;
        block += line_size;
    }
}
static inline void avg_no_rnd_pixels8_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels8_8_c(block, pixels, line_size, h);
}
static inline void avg_no_rnd_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride ]), no_rnd_avg32(a, b));
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4*sizeof(uint8_t)]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4*sizeof(uint8_t)]))->l);
        *((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]), no_rnd_avg32(a, b));
    }
}
static inline void avg_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride ]), rnd_avg32(a, b));
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4*sizeof(uint8_t)]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4*sizeof(uint8_t)]))->l);
        *((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]), rnd_avg32(a, b));
    }
}
static inline void avg_pixels4_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride ]), rnd_avg32(a, b));
    }
}
static inline void avg_pixels2_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_16 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_16 *) (&src2[i*src_stride2 ]))->l);
        *((uint16_t *)&dst[i*dst_stride ]) = rnd_avg32(*((uint16_t *)&dst[i*dst_stride ]), rnd_avg32(a, b));
    }
}
static inline void avg_pixels16_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    avg_pixels8_l2_8(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);
    avg_pixels8_l2_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, h);
}
static inline void avg_no_rnd_pixels16_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    avg_no_rnd_pixels8_l2_8(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);
    avg_no_rnd_pixels8_l2_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), dst_stride
                            , src_stride1, src_stride2, h);
}
static inline void avg_no_rnd_pixels8_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_no_rnd_pixels8_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void avg_pixels8_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels8_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void avg_no_rnd_pixels8_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_no_rnd_pixels8_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void avg_pixels8_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels8_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void avg_pixels8_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b, c, d, l0, l1, h0, h1;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride]), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3+4]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4+4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride+4]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride+4]), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
    }
}
static inline void avg_pixels4_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels4_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void avg_pixels4_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels4_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void avg_pixels2_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels2_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void avg_pixels2_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels2_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void avg_no_rnd_pixels8_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b, c, d, l0, l1, h0, h1;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride]), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3+4]))->l);
        d = (((const union
               unaligned_32 *) (&src4[i*src_stride4+4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride+4]) = rnd_avg32(*((uint32_t *)&dst[i*dst_stride+4]), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
    }
}
static inline void avg_pixels16_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    avg_pixels8_l4_8(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
    avg_pixels8_l4_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), src3 + 8 * sizeof(uint8_t), src4 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
}
static inline void avg_no_rnd_pixels16_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    avg_no_rnd_pixels8_l4_8(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
    avg_no_rnd_pixels8_l4_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), src3 + 8 * sizeof(uint8_t), src4 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
}
static inline void avg_pixels2_xy2_8_c(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)
{
    int i, a0, b0, a1, b1;
    uint8_t *block = (uint8_t *)p_block;
    const uint8_t *pixels = (const uint8_t *)p_pixels;
    line_size >>= sizeof(uint8_t) - 1;
    a0 = pixels[0];
    b0 = pixels[1] + 2;
    a0 += b0;
    b0 += pixels[2];
    pixels += line_size;
    for(i = 0; i < h; i += 2)
    {
        a1 = pixels[0];
        b1 = pixels[1];
        a1 += b1;
        b1 += pixels[2];
        block[0] = (a1 + a0) >> 2;
        block[1] = (b1 + b0) >> 2;
        pixels += line_size;
        block += line_size;
        a0 = pixels[0];
        b0 = pixels[1] + 2;
        a0 += b0;
        b0 += pixels[2];
        block[0] = (a1 + a0) >> 2;
        block[1] = (b1 + b0) >> 2;
        pixels += line_size;
        block += line_size;
    }
}
static inline void avg_pixels4_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
    const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
    uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
    uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
    uint32_t l1, h1;
    pixels += line_size;
    for(i = 0; i < h; i += 2)
    {
        uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
        pixels += line_size;
        block += line_size;
        a = (((const union unaligned_32 *) (pixels))->l);
        b = (((const union unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
        pixels += line_size;
        block += line_size;
    }
}
static inline void avg_pixels8_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int j;
    for(j = 0; j < 2; j++)
    {
        int i;
        const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        uint32_t l1, h1;
        pixels += line_size;
        for(i = 0; i < h; i += 2)
        {
            uint32_t a = (((const union unaligned_32 *) (pixels))->l);
            uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
            l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
            h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
            pixels += line_size;
            block += line_size;
            a = (((const union unaligned_32 *) (pixels))->l);
            b = (((const union unaligned_32 *) (pixels + 1))->l)
                ;
            l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
            h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
            pixels += line_size;
            block += line_size;
        }
        pixels += 4 - line_size * (h + 1);
        block += 4 - line_size * h;
    }
}
static inline void avg_no_rnd_pixels8_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int j;
    for(j = 0; j < 2; j++)
    {
        int i;
        const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        uint32_t l1, h1;
        pixels += line_size;
        for(i = 0; i < h; i += 2)
        {
            uint32_t a = (((const union unaligned_32 *) (pixels))->l);
            uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
            l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
            h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
            pixels += line_size;
            block += line_size;
            a = (((const union unaligned_32 *) (pixels))->l);
            b = (((const union unaligned_32 *) (pixels + 1))->l);
            l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
            h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = rnd_avg32(*((uint32_t *)block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
            pixels += line_size;
            block += line_size;
        }
        pixels += 4 - line_size * (h + 1);
        block += 4 - line_size * h;
    }
}
static void avg_pixels16_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels8_8_c(block , pixels , line_size, h);
    avg_pixels8_8_c(block + 8 * sizeof(uint8_t), pixels + 8 * sizeof(uint8_t), line_size, h);
}
__attribute__((unused)) static void avg_no_rnd_pixels16_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    avg_pixels8_8_c(block , pixels , line_size, h);
    avg_pixels8_8_c(block + 8 * sizeof(uint8_t), pixels + 8 * sizeof(uint8_t), line_size, h);
}

static void put_pixels2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint16_t *)(block )) = (((const union unaligned_16 *) (pixels))->l);
        pixels += line_size;
        block += line_size;
    }
}
static void put_pixels4_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint32_t *)(block )) = (((const union unaligned_32 *) (pixels))->l);
        pixels += line_size;
        block += line_size;
    }
}
static void put_pixels8_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        *((uint32_t *)(block )) = (((const union unaligned_32 *) (pixels))->l);
        *((uint32_t *)(block + 4 * sizeof(uint8_t))) = (((const union unaligned_32 *) (pixels + 4 * sizeof(uint8_t)))->l);
        pixels += line_size;
        block += line_size;
    }
}
static inline void put_no_rnd_pixels8_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels8_8_c(block, pixels, line_size, h);
}
static inline void put_no_rnd_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = no_rnd_avg32(a, b);
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4*sizeof(uint8_t)]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4*sizeof(uint8_t)]))->l);
        *((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]) = no_rnd_avg32(a, b);
    }
}
static inline void put_pixels8_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = rnd_avg32(a, b);
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4*sizeof(uint8_t)]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4*sizeof(uint8_t)]))->l);
        *((uint32_t *)&dst[i*dst_stride+4*sizeof(uint8_t)]) = rnd_avg32(a, b);
    }
}
static inline void put_pixels4_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2 ]))->l);
        *((uint32_t *)&dst[i*dst_stride ]) = rnd_avg32(a, b);
    }
}
static inline void put_pixels2_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b;
        a = (((const union unaligned_16 *) (&src1[i*src_stride1 ]))->l);
        b = (((const union unaligned_16 *) (&src2[i*src_stride2 ]))->l);
        *((uint16_t *)&dst[i*dst_stride ]) = rnd_avg32(a, b);
    }
}
static inline void put_pixels16_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    put_pixels8_l2_8(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);
    put_pixels8_l2_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, h);
}
static inline void put_no_rnd_pixels16_l2_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h)
{
    put_no_rnd_pixels8_l2_8(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);
    put_no_rnd_pixels8_l2_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, h);
}
static inline void put_no_rnd_pixels8_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_no_rnd_pixels8_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void put_pixels8_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels8_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void put_no_rnd_pixels8_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_no_rnd_pixels8_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void put_pixels8_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels8_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void put_pixels8_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b, c, d, l0, l1, h0, h1;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride]) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3+4]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4+4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride+4]) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
    }
}
static inline void put_pixels4_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels4_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void put_pixels4_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels4_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void put_pixels2_x2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels2_l2_8(block, pixels, pixels + sizeof(uint8_t), line_size, line_size, line_size, h);
}
static inline void put_pixels2_y2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels2_l2_8(block, pixels, pixels + line_size, line_size, line_size, line_size, h);
}
static inline void put_no_rnd_pixels8_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    int i;
    for(i = 0; i < h; i++)
    {
        uint32_t a, b, c, d, l0, l1, h0, h1;
        a = (((const union unaligned_32 *) (&src1[i*src_stride1]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride]) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        a = (((const union unaligned_32 *) (&src1[i*src_stride1+4]))->l);
        b = (((const union unaligned_32 *) (&src2[i*src_stride2+4]))->l);
        c = (((const union unaligned_32 *) (&src3[i*src_stride3+4]))->l);
        d = (((const union unaligned_32 *) (&src4[i*src_stride4+4]))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        l1 = (c & 0x03030303UL) + (d & 0x03030303UL);
        h1 = ((c & 0xFCFCFCFCUL) >> 2) + ((d & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)&dst[i*dst_stride+4]) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
    }
}
static inline void put_pixels16_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    put_pixels8_l4_8(dst , src1 , src2 , src3 ,
                     src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
    put_pixels8_l4_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), src3 + 8 * sizeof(uint8_t), src4 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
}
static inline void put_no_rnd_pixels16_l4_8(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4, int dst_stride, int src_stride1, int src_stride2, int src_stride3, int src_stride4, int h)
{
    put_no_rnd_pixels8_l4_8(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
    put_no_rnd_pixels8_l4_8(dst + 8 * sizeof(uint8_t), src1 + 8 * sizeof(uint8_t), src2 + 8 * sizeof(uint8_t), src3 + 8 * sizeof(uint8_t), src4 + 8 * sizeof(uint8_t), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);
}
static inline void put_pixels2_xy2_8_c(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)
{
    int i, a0, b0, a1, b1;
    uint8_t *block = (uint8_t *)p_block;
    const uint8_t *pixels = (const uint8_t *)p_pixels;
    line_size >>= sizeof(uint8_t) - 1;
    a0 = pixels[0];
    b0 = pixels[1] + 2;
    a0 += b0;
    b0 += pixels[2];
    pixels += line_size;
    for(i = 0; i < h; i += 2)
    {
        a1 = pixels[0];
        b1 = pixels[1];
        a1 += b1;
        b1 += pixels[2];
        block[0] = (a1 + a0) >> 2;
        block[1] = (b1 + b0) >> 2;
        pixels += line_size;
        block += line_size;
        a0 = pixels[0];
        b0 = pixels[1] + 2;
        a0 += b0;
        b0 += pixels[2];
        block[0] = (a1 + a0) >> 2;
        block[1] = (b1 + b0) >> 2;
        pixels += line_size;
        block += line_size;
    }
}
static inline void put_pixels4_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int i;
    const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
    const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
    uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
    uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
    uint32_t l1, h1;
    pixels += line_size;
    for(i = 0; i < h; i += 2)
    {
        uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
        h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
        a = (((const union unaligned_32 *) (pixels))->l);
        b = (((const union unaligned_32 *) (pixels + 1))->l);
        l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
        pixels += line_size;
        block += line_size;
    }
}
static inline void put_pixels8_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int j;
    for(j = 0; j < 2; j++)
    {
        int i;
        const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
        uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        uint32_t l1, h1;
        pixels += line_size;
        for(i = 0; i < h; i += 2)
        {
            uint32_t a = (((const union unaligned_32 *) (pixels))->l);
            uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
            l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
            h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
            pixels += line_size;
            block += line_size;
            a = (((const union unaligned_32 *) (pixels))->l);
            b = (((const union unaligned_32 *) (pixels + 1))->l);
            l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
            h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
            pixels += line_size;
            block += line_size;
        }
        pixels += 4 - line_size * (h + 1);
        block += 4 - line_size * h;
    }
}
static inline void put_no_rnd_pixels8_xy2_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    int j;
    for(j = 0; j < 2; j++)
    {
        int i;
        const uint32_t a = (((const union unaligned_32 *) (pixels))->l);
        const uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
        uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
        uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
        uint32_t l1, h1;
        pixels += line_size;
        for(i = 0; i < h; i += 2)
        {
            uint32_t a = (((const
                            union unaligned_32 *) (pixels))->l);
            uint32_t b = (((const union unaligned_32 *) (pixels + 1))->l);
            l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
            h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
            pixels += line_size;
            block += line_size;
            a = (((const union unaligned_32 *) (pixels))->l);
            b = (((const union unaligned_32 *) (pixels + 1))->l);
            l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
            h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
            *((uint32_t *)block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
            pixels += line_size;
            block += line_size;
        }
        pixels += 4 - line_size * (h + 1);
        block += 4 - line_size * h;
    }
}
static void put_pixels16_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels8_8_c(block , pixels , line_size, h);
    put_pixels8_8_c(block + 8 * sizeof(uint8_t), pixels + 8 * sizeof(uint8_t), line_size, h);
}
__attribute__((unused)) static void put_no_rnd_pixels16_8_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
    put_pixels8_8_c(block , pixels , line_size, h);
    put_pixels8_8_c(block + 8 * sizeof(uint8_t), pixels + 8 * sizeof(uint8_t), line_size, h);
}

static void put_h264_chroma_mc2_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6);
            dst[1] = ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + E * src[step+0])) + 32) >> 6);
            dst[1] = ((((A * src[1] + E * src[step+1])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
}
static void put_h264_chroma_mc4_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6);
            dst[1] = ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6);
            dst[2] = ((((A * src[2] + B * src[3] + C * src[stride+2] + D * src[stride+3])) + 32) >> 6);
            dst[3] = ((((A * src[3] + B * src[4] + C * src[stride+3] + D * src[stride+4])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + E * src[step+0])) + 32) >> 6);
            dst[1] = ((((A * src[1] + E * src[step+1])) + 32) >> 6);
            dst[2] = ((((A * src[2] + E * src[step+2])) + 32) >> 6);
            dst[3] = ((((A * src[3] + E * src[step+3])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
}
static void put_h264_chroma_mc8_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6);
            dst[1] = ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6);
            dst[2] = ((((A * src[2] + B * src[3] + C * src[stride+2] + D * src[stride+3])) + 32) >> 6);
            dst[3] = ((((A * src[3] + B * src[4] + C * src[stride+3] + D * src[stride+4])) + 32) >> 6);
            dst[4] = ((((A * src[4] + B * src[5] + C * src[stride+4] + D * src[stride+5])) + 32) >> 6);
            dst[5] = ((((A * src[5] + B * src[6] + C * src[stride+5] + D * src[stride+6])) + 32) >> 6);
            dst[6] = ((((A * src[6] + B * src[7] + C * src[stride+6] + D * src[stride+7])) + 32) >> 6);
            dst[7] = ((((A * src[7] + B * src[8] + C * src[stride+7] + D * src[stride+8])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = ((((A * src[0] + E * src[step+0])) + 32) >> 6);
            dst[1] = ((((A * src[1] + E * src[step+1])) + 32) >> 6);
            dst[2] = ((((A * src[2] + E * src[step+2])) + 32) >> 6);
            dst[3] = ((((A * src[3] + E * src[step+3])) + 32) >> 6);
            dst[4] = ((((A * src[4] + E * src[step+4])) + 32) >> 6);
            dst[5] = ((((A * src[5] + E * src[step+5])) + 32) >> 6);
            dst[6] = ((((A * src[6] + E * src[step+6])) + 32) >> 6);
            dst[7] = ((((A * src[7] + E * src[step+7])) + 32) >> 6);
            dst += stride;
            src += stride;
        }
    }
}
static void avg_h264_chroma_mc2_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + E * src[step+0])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + E * src[step+1])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
}
static void avg_h264_chroma_mc4_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6) + 1) >> 1);
            dst[2] = (((dst[2]) + ((((A * src[2] + B * src[3] + C * src[stride+2] + D * src[stride+3])) + 32) >> 6) + 1) >> 1);
            dst[3] = (((dst[3]) + ((((A * src[3] + B * src[4] + C * src[stride+3] + D * src[stride+4])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + E * src[step+0])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + E * src[step+1])) + 32) >> 6) + 1) >> 1);
            dst[2] = (((dst[2]) + ((((A * src[2] + E * src[step+2])) + 32) >> 6) + 1) >> 1);
            dst[3] = (((dst[3]) + ((((A * src[3] + E * src[step+3])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
}
static void avg_h264_chroma_mc8_8_c(uint8_t *p_dst , uint8_t *p_src , int stride, int h, int x, int y)
{
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    const int A = (8 - x) * (8 - y);
    const int B = ( x) * (8 - y);
    const int C = (8 - x) * ( y);
    const int D = ( x) * ( y);
    int i;
    stride >>= sizeof(uint8_t) - 1;
    ((void)0);
    if(D)
    {
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + B * src[1] + C * src[stride+0] + D * src[stride+1])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + B * src[2] + C * src[stride+1] + D * src[stride+2])) + 32) >> 6) + 1) >> 1);
            dst[2] = (((dst[2]) + ((((A * src[2] + B * src[3] + C * src[stride+2] + D * src[stride+3])) + 32) >> 6) + 1) >> 1);
            dst[3] = (((dst[3]) + ((((A * src[3] + B * src[4] + C * src[stride+3] + D * src[stride+4])) + 32) >> 6) + 1) >> 1);
            dst[4] = (((dst[4]) + ((((A * src[4] + B * src[5] + C * src[stride+4] + D * src[stride+5])) + 32) >> 6) + 1) >> 1);
            dst[5] = (((dst[5]) + ((((A * src[5] + B * src[6] + C * src[stride+5] + D * src[stride+6])) + 32) >> 6) + 1) >> 1);
            dst[6] = (((dst[6]) + ((((A * src[6] + B * src[7] + C * src[stride+6] + D * src[stride+7])) + 32) >> 6) + 1) >> 1);
            dst[7] = (((dst[7]) + ((((A * src[7] + B * src[8] + C * src[stride+7] + D * src[stride+8])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
    else
    {
        const int E = B + C;
        const int step = C ? stride : 1;
        for(i = 0; i < h; i++)
        {
            dst[0] = (((dst[0]) + ((((A * src[0] + E * src[step+0])) + 32) >> 6) + 1) >> 1);
            dst[1] = (((dst[1]) + ((((A * src[1] + E * src[step+1])) + 32) >> 6) + 1) >> 1);
            dst[2] = (((dst[2]) + ((((A * src[2] + E * src[step+2])) + 32) >> 6) + 1) >> 1);
            dst[3] = (((dst[3]) + ((((A * src[3] + E * src[step+3])) + 32) >> 6) + 1) >> 1);
            dst[4] = (((dst[4]) + ((((A * src[4] + E * src[step+4])) + 32) >> 6) + 1) >> 1);
            dst[5] = (((dst[5]) + ((((A * src[5] + E * src[step+5])) + 32) >> 6) + 1) >> 1);
            dst[6] = (((dst[6]) + ((((A * src[6] + E * src[step+6])) + 32) >> 6) + 1) >> 1);
            dst[7] = (((dst[7]) + ((((A * src[7] + E * src[step+7])) + 32) >> 6) + 1) >> 1);
            dst += stride;
            src += stride;
        }
    }
}


static __attribute__((unused)) void put_h264_qpel2_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 2;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3])) + 16) >> 5];
        dst[1] = cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4])) + 16) >> 5];
        dst += dstStride;
        src += srcStride;
    }
}
static __attribute__((unused)) void put_h264_qpel2_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 2;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        dst[0*dstStride] = cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5];
        dst[1*dstStride] = cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+src4)) + 16)>>5];
        dst++;
        src++;
    }
}
static __attribute__((unused)) void put_h264_qpel2_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 2;
    const int w = 2;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        dst[0*dstStride] = cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10];
        dst[1*dstStride] = cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10];
        dst++;
        tmp++;
    }
}
static void put_h264_qpel4_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 4;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3])) + 16) >> 5];
        dst[1] = cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4])) + 16) >> 5];
        dst[2] = cm[(((src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5])) + 16) >> 5];
        dst[3] = cm[(((src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6])) + 16) >> 5];
        dst += dstStride;
        src += srcStride;
    }
}
static void put_h264_qpel4_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 4;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        const int src5 = src[5 *srcStride];
        const int src6 = src[6 *srcStride];
        dst[0*dstStride] = cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5];
        dst[1*dstStride] = cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+src4)) + 16)>>5];
        dst[2*dstStride] = cm[(((src2+src3)*20 - (src1+src4)*5 + (src0+src5)) + 16)>>5];
        dst[3*dstStride] = cm[(((src3+src4)*20 - (src2+src5)*5 + (src1+src6)) + 16)>>5];
        dst++;
        src++;
    }
}
static void put_h264_qpel4_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 4;
    const int w = 4;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4]) + pad;
        tmp[2] = (src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5]) + pad;
        tmp[3] = (src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        const int tmp5 = tmp[5 *tmpStride] - pad;
        const int tmp6 = tmp[6 *tmpStride] - pad;
        dst[0*dstStride] = cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10];
        dst[1*dstStride] = cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10];
        dst[2*dstStride] = cm[(((tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)) + 512)>>10];
        dst[3*dstStride] = cm[(((tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)) + 512)>>10];
        dst++;
        tmp++;
    }
}
static void put_h264_qpel8_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 8;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3 ])) + 16) >> 5];
        dst[1] = cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4 ])) + 16) >> 5];
        dst[2] = cm[(((src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5 ])) + 16) >> 5];
        dst[3] = cm[(((src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6 ])) + 16) >> 5];
        dst[4] = cm[(((src[4] + src[5]) * 20 - (src[3 ] + src[6]) * 5 + (src[2 ] + src[7 ])) + 16) >> 5];
        dst[5] = cm[(((src[5] + src[6]) * 20 - (src[4 ] + src[7]) * 5 + (src[3 ] + src[8 ])) + 16) >> 5];
        dst[6] = cm[(((src[6] + src[7]) * 20 - (src[5 ] + src[8]) * 5 + (src[4 ] + src[9 ])) + 16) >> 5];
        dst[7] = cm[(((src[7] + src[8]) * 20 - (src[6 ] + src[9]) * 5 + (src[5 ] + src[10])) + 16) >> 5];
        dst += dstStride;
        src += srcStride;
    }
}
static void put_h264_qpel8_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 8;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        const int src5 = src[5 *srcStride];
        const int src6 = src[6 *srcStride];
        const int src7 = src[7 *srcStride];
        const int src8 = src[8 *srcStride];
        const int src9 = src[9 *srcStride];
        const int src10 = src[10*srcStride];
        dst[0*dstStride] = cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5];
        dst[1*dstStride] = cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+src4)) + 16)>>5];
        dst[2*dstStride] = cm[(((src2+src3)*20 - (src1+src4)*5 + (src0+src5)) + 16)>>5];
        dst[3*dstStride] = cm[(((src3+src4)*20 - (src2+src5)*5 + (src1+src6)) + 16)>>5];
        dst[4*dstStride] = cm[(((src4+src5)*20 - (src3+src6)*5 + (src2+src7)) + 16)>>5];
        dst[5*dstStride] = cm[(((src5+src6)*20 - (src4+src7)*5 + (src3+src8)) + 16)>>5];
        dst[6*dstStride] = cm[(((src6+src7)*20 - (src5+src8)*5 + (src4+src9)) + 16)>>5];
        dst[7*dstStride] = cm[(((src7+src8)*20 - (src6+src9)*5 + (src5+src10)) + 16)>>5];
        dst++;
        src++;
    }
}

static void put_h264_qpel8_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 8;
    const int w = 8;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3 ]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4 ]) + pad;
        tmp[2] = (src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5 ]) + pad;
        tmp[3] = (src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6 ]) + pad;
        tmp[4] = (src[4] + src[5]) * 20 - (src[3 ] + src[6]) * 5 + (src[2 ] + src[7 ]) + pad;
        tmp[5] = (src[5] + src[6]) * 20 - (src[4 ] + src[7]) * 5 + (src[3 ] + src[8 ]) + pad;
        tmp[6] = (src[6] + src[7]) * 20 - (src[5 ] + src[8]) * 5 + (src[4 ] + src[9 ]) + pad;
        tmp[7] = (src[7] + src[8]) * 20 - (src[6 ] + src[9]) * 5 + (src[5 ] + src[10]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        const int tmp5 = tmp[5 *tmpStride] - pad;
        const int tmp6 = tmp[6 *tmpStride] - pad;
        const int tmp7 = tmp[7 *tmpStride] - pad;
        const int tmp8 = tmp[8 *tmpStride] - pad;
        const int tmp9 = tmp[9 *tmpStride] - pad;
        const int tmp10 = tmp[10*tmpStride] - pad;
        dst[0*dstStride] = cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10];
        dst[1*dstStride] = cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10];
        dst[2*dstStride] = cm[(((tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)) + 512)>>10];
        dst[3*dstStride] = cm[(((tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)) + 512)>>10];
        dst[4*dstStride] = cm[(((tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7)) + 512)>>10];
        dst[5*dstStride] = cm[(((tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8)) + 512)>>10];
        dst[6*dstStride] = cm[(((tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9)) + 512)>>10];
        dst[7*dstStride] = cm[(((tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10)) + 512)>>10];
        dst++;
        tmp++;
    }
}

static void put_h264_qpel16_v_lowpass_8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
{
    put_h264_qpel8_v_lowpass_8(dst , src , dstStride, srcStride);
    put_h264_qpel8_v_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    put_h264_qpel8_v_lowpass_8(dst , src , dstStride, srcStride);
    put_h264_qpel8_v_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
}

static void put_h264_qpel16_h_lowpass_8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
{
    put_h264_qpel8_h_lowpass_8(dst , src , dstStride, srcStride);
    put_h264_qpel8_h_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    put_h264_qpel8_h_lowpass_8(dst , src , dstStride, srcStride);
    put_h264_qpel8_h_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
}

static void put_h264_qpel16_hv_lowpass_8(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride)
{
    put_h264_qpel8_hv_lowpass_8(dst , tmp , src , dstStride, tmpStride, srcStride);
    put_h264_qpel8_hv_lowpass_8(dst + 8 * sizeof(uint8_t), tmp + 8, src + 8 * sizeof(uint8_t), dstStride, tmpStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    put_h264_qpel8_hv_lowpass_8(dst , tmp , src , dstStride, tmpStride, srcStride);
    put_h264_qpel8_hv_lowpass_8(dst + 8 * sizeof(uint8_t), tmp + 8, src + 8 * sizeof(uint8_t), dstStride, tmpStride, srcStride);
}
static __attribute__((unused)) void avg_h264_qpel2_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 2;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = (((dst[0]) + cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3])) + 16) >> 5] + 1) >> 1);
        dst[1] = (((dst[1]) + cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4])) + 16) >> 5] + 1) >> 1);
        dst += dstStride;
        src += srcStride;
    }
}
static __attribute__((unused)) void avg_h264_qpel2_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 2;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5] + 1) >> 1);
        dst[1*dstStride] = (((dst[1*dstStride]) + cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+src4)) + 16)>>5] + 1) >> 1);
        dst++;
        src++;
    }
}
static __attribute__((unused)) void avg_h264_qpel2_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 2;
    const int w = 2;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10] + 1) >> 1);
        dst[1*dstStride] = (((dst[1*dstStride]) + cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10] + 1) >> 1);
        dst++;
        tmp++;
    }
}
static void avg_h264_qpel4_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 4;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = (((dst[0]) + cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3])) + 16) >> 5] + 1) >> 1);
        dst[1] = (((dst[1]) + cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4])) + 16) >> 5] + 1) >> 1);
        dst[2] = (((dst[2]) + cm[(((src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5])) + 16) >> 5] + 1) >> 1);
        dst[3] = (((dst[3]) + cm[(((src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6])) + 16) >> 5] + 1) >> 1);
        dst += dstStride;
        src += srcStride;
    }
}
static void avg_h264_qpel4_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 4;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        const int src5 = src[5 *srcStride];
        const int src6 = src[6 *srcStride];
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5] + 1) >> 1);
        dst[1*dstStride]
        = (((dst[1*dstStride]) + cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+src4)) + 16)>>5] + 1) >> 1);
        dst[2*dstStride] = (((dst[2*dstStride]) + cm[(((src2+src3)*20 - (src1+src4)*5 + (src0+src5)) + 16)>>5] + 1) >> 1);
        dst[3*dstStride] = (((dst[3*dstStride]) + cm[(((src3+src4)*20 - (src2+src5)*5 + (src1+src6)) + 16)>>5] + 1) >> 1);
        dst++;
        src++;
    }
}
static void avg_h264_qpel4_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 4;
    const int w = 4;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4]) + pad;
        tmp[2] = (src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5]) + pad;
        tmp[3] = (src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        const int tmp5 = tmp[5 *tmpStride] - pad;
        const int tmp6 = tmp[6 *tmpStride] - pad;
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10] + 1) >> 1);
        dst[1*dstStride] = (((dst[1*dstStride]) + cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10] + 1) >> 1);
        dst[2*dstStride] = (((dst[2*dstStride]) + cm[(((tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)) + 512)>>10] + 1) >> 1);
        dst[3*dstStride] = (((dst[3*dstStride]) + cm[(((tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)) + 512)>>10] + 1) >> 1);
        dst++;
        tmp++;
    }
}
static void avg_h264_qpel8_h_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int h = 8;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < h; i++)
    {
        dst[0] = (((dst[0]) + cm[(((src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3 ])) + 16) >> 5] + 1) >> 1);
        dst[1] = (((dst[1]) + cm[(((src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4 ])) + 16) >> 5] + 1) >> 1);
        dst[2] = (((dst[2]) + cm[(((src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5 ])) + 16) >> 5] + 1) >> 1);
        dst[3] = (((dst[3]) + cm[(((src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6 ])) + 16) >> 5] + 1) >> 1);
        dst[4] = (((dst[4]) + cm[(((src[4] + src[5]) * 20 - (src[3 ] + src[6]) * 5 + (src[2 ] + src[7 ])) + 16) >> 5] + 1) >> 1);
        dst[5] = (((dst[5]) + cm[(((src[5] + src[6]) * 20 - (src[4 ] + src[7]) * 5 + (src[3 ] + src[8 ])) + 16) >> 5] + 1) >> 1);
        dst[6] = (((dst[6]) + cm[(((src[6] + src[7]) * 20 - (src[5 ] + src[8]) * 5 + (src[4 ] + src[9 ])) + 16) >> 5] + 1) >> 1);
        dst[7] = (((dst[7]) + cm[(((src[7] + src[8]) * 20 - (src[6 ] + src[9]) * 5 + (src[5 ] + src[10])) + 16) >> 5] + 1) >> 1);
        dst += dstStride;
        src += srcStride;
    }
}
static void avg_h264_qpel8_v_lowpass_8(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride)
{
    const int w = 8;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    for(i = 0; i < w; i++)
    {
        const int srcB = src[-2*srcStride];
        const int srcA = src[-1*srcStride];
        const int src0 = src[0 *srcStride];
        const int src1 = src[1 *srcStride];
        const int src2 = src[2 *srcStride];
        const int src3 = src[3 *srcStride];
        const int src4 = src[4 *srcStride];
        const int src5 = src[5 *srcStride];
        const int src6 = src[6 *srcStride];
        const int src7 = src[7 *srcStride];
        const int src8 = src[8 *srcStride];
        const int src9 = src[9 *srcStride];
        const int src10 = src[10*srcStride];
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3)) + 16)>>5] + 1) >> 1);
        dst[1*dstStride] = (((dst[1*dstStride]) + cm[(((src1+src2)*20 - (src0+src3)*5 + (srcA+
                             src4)) + 16)>>5] + 1) >> 1);
        dst[2*dstStride] = (((dst[2*dstStride]) + cm[(((src2+src3)*20 - (src1+src4)*5 + (src0+src5)) + 16)>>5] + 1) >> 1);
        dst[3*dstStride] = (((dst[3*dstStride]) + cm[(((src3+src4)*20 - (src2+src5)*5 + (src1+src6)) + 16)>>5] + 1) >> 1);
        dst[4*dstStride] = (((dst[4*dstStride]) + cm[(((src4+src5)*20 - (src3+src6)*5 + (src2+src7)) + 16)>>5] + 1) >> 1);
        dst[5*dstStride] = (((dst[5*dstStride]) + cm[(((src5+src6)*20 - (src4+src7)*5 + (src3+src8)) + 16)>>5] + 1) >> 1);
        dst[6*dstStride] = (((dst[6*dstStride]) + cm[(((src6+src7)*20 - (src5+src8)*5 + (src4+src9)) + 16)>>5] + 1) >> 1);
        dst[7*dstStride] = (((dst[7*dstStride]) + cm[(((src7+src8)*20 - (src6+src9)*5 + (src5+src10)) + 16)>>5] + 1) >> 1);
        dst++;
        src++;
    }
}
static void avg_h264_qpel8_hv_lowpass_8(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride)
{
    const int h = 8;
    const int w = 8;
    const int pad = (8 > 9) ? (-10 * ((1 << 8) - 1)) : 0;
    uint8_t *cm = ff_cropTbl + 1024;
    int i;
    uint8_t *dst = (uint8_t *)p_dst;
    uint8_t *src = (uint8_t *)p_src;
    dstStride >>= sizeof(uint8_t) - 1;
    srcStride >>= sizeof(uint8_t) - 1;
    src -= 2 * srcStride;
    for(i = 0; i < h + 5; i++)
    {
        tmp[0] = (src[0] + src[1]) * 20 - (src[-1] + src[2]) * 5 + (src[-2] + src[3 ]) + pad;
        tmp[1] = (src[1] + src[2]) * 20 - (src[0 ] + src[3]) * 5 + (src[-1] + src[4 ]) + pad;
        tmp[2] = (src[2] + src[3]) * 20 - (src[1 ] + src[4]) * 5 + (src[0 ] + src[5 ]) + pad;
        tmp[3] = (src[3] + src[4]) * 20 - (src[2 ] + src[5]) * 5 + (src[1 ] + src[6 ]) + pad;
        tmp[4] = (src[4] + src[5]) * 20 - (src[3 ] + src[6]) * 5 + (src[2 ] + src[7 ]) + pad;
        tmp[5] = (src[5] + src[6]) * 20 - (src[4 ] + src[7]) * 5 + (src[3 ] + src[8 ]) + pad;
        tmp[6] = (src[6] + src[7]) * 20 - (src[5 ] + src[8]) * 5 + (src[4 ] + src[9 ]) + pad;
        tmp[7] = (src[7] + src[8]) * 20 - (src[6 ] + src[9]) * 5 + (src[5 ] + src[10]) + pad;
        tmp += tmpStride;
        src += srcStride;
    }
    tmp -= tmpStride * (h + 5 - 2);
    for(i = 0; i < w; i++)
    {
        const int tmpB = tmp[-2*tmpStride] - pad;
        const int tmpA = tmp[-1*tmpStride] - pad;
        const int tmp0 = tmp[0 *tmpStride] - pad;
        const int tmp1 = tmp[1 *tmpStride] - pad;
        const int tmp2 = tmp[2 *tmpStride] - pad;
        const int tmp3 = tmp[3 *tmpStride] - pad;
        const int tmp4 = tmp[4 *tmpStride] - pad;
        const int tmp5 = tmp[5 *tmpStride] - pad;
        const int tmp6 = tmp[6 *tmpStride] - pad;
        const int tmp7 = tmp[7 *tmpStride] - pad;
        const int tmp8 = tmp[8 *tmpStride] - pad;
        const int tmp9 = tmp[9 *tmpStride] - pad;
        const int tmp10 = tmp[10*tmpStride] - pad;
        dst[0*dstStride] = (((dst[0*dstStride]) + cm[(((tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3)) + 512)>>10] + 1) >> 1);
        dst[1*dstStride] = (((dst[1*dstStride]) + cm[(((tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4)) + 512)>>10] + 1) >> 1);
        dst[2*dstStride] = (((dst[2*dstStride]) + cm[(((tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5)) + 512)>>10] + 1) >> 1);
        dst[3*dstStride] = (((dst[3*dstStride]) + cm[(((tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6)) + 512)>>10] + 1) >> 1);
        dst[4*dstStride] = (((dst[4*dstStride]) + cm[(((tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7)) + 512)>>10] + 1) >> 1);
        dst[5*dstStride] = (((dst[5*dstStride]) + cm[(((tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8)) + 512)>>10] + 1) >> 1);
        dst[6*dstStride] = (((dst[6*dstStride]) + cm[(((tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9)) + 512)>>10] + 1) >> 1);
        dst[7*dstStride] = (((dst[7*dstStride]) + cm[(((tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10)) + 512)>>10] + 1) >> 1);
        dst++;
        tmp++;
    }
}
static void avg_h264_qpel16_v_lowpass_8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
{
    avg_h264_qpel8_v_lowpass_8(dst , src , dstStride, srcStride);
    avg_h264_qpel8_v_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    avg_h264_qpel8_v_lowpass_8(dst , src , dstStride, srcStride);
    avg_h264_qpel8_v_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
}
static void avg_h264_qpel16_h_lowpass_8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride)
{
    avg_h264_qpel8_h_lowpass_8(dst , src , dstStride, srcStride);
    avg_h264_qpel8_h_lowpass_8(dst + 8 * sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    avg_h264_qpel8_h_lowpass_8(dst , src , dstStride, srcStride);
    avg_h264_qpel8_h_lowpass_8(dst + 8 *
                               sizeof(uint8_t), src + 8 * sizeof(uint8_t), dstStride, srcStride);
}
static void avg_h264_qpel16_hv_lowpass_8(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride)
{
    avg_h264_qpel8_hv_lowpass_8(dst , tmp , src , dstStride, tmpStride, srcStride);
    avg_h264_qpel8_hv_lowpass_8(dst + 8 * sizeof(uint8_t), tmp + 8, src + 8 * sizeof(uint8_t), dstStride, tmpStride, srcStride);
    src += 8 * srcStride;
    dst += 8 * dstStride;
    avg_h264_qpel8_hv_lowpass_8(dst , tmp , src , dstStride, tmpStride, srcStride);
    avg_h264_qpel8_hv_lowpass_8(dst + 8 * sizeof(uint8_t), tmp + 8, src + 8 * sizeof(uint8_t), dstStride, tmpStride, srcStride);
}
static __attribute__((unused)) void put_h264_qpel2_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_pixels2_8_c(dst, src, stride, 2);
}
static void put_h264_qpel2_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(half, src, 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, src, half, stride, stride, 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_h264_qpel2_h_lowpass_8(dst, src, stride, stride);
}
static void put_h264_qpel2_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(half, src, 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t half[2*2*sizeof(uint8_t)];
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(half, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, full_mid, half, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(dst, full_mid, stride, 2 * sizeof(uint8_t));
}
static void put_h264_qpel2_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t half[2*2*sizeof(uint8_t)];
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(half, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, full_mid + 2 * sizeof(uint8_t), half, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src, 2 * sizeof(uint8_t), stride);
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, halfH, halfV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src, 2 * sizeof(uint8_t), stride);
    copy_block2_8(full, src - stride * 2 + sizeof(uint8_t), 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, halfH, halfV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src + stride, 2 * sizeof(uint8_t), stride);
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, halfH, halfV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src + stride, 2 * sizeof(uint8_t), stride);
    copy_block2_8(full, src - stride * 2 + sizeof(uint8_t), 2 * sizeof(uint8_t), stride, 2
                  + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_pixels2_l2_8(dst, halfH, halfV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[2*(2 +5)*sizeof(uint8_t)];
    put_h264_qpel2_hv_lowpass_8(dst, tmp, src, stride, 2 * sizeof(uint8_t), stride);
}
static void put_h264_qpel2_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[2*(2 +5)*sizeof(uint8_t)];
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfHV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src, 2 * sizeof(uint8_t), stride);
    put_h264_qpel2_hv_lowpass_8(halfHV, tmp, src, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, halfH, halfHV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[2*(2 +5)*sizeof(uint8_t)];
    uint8_t halfH[2*2*sizeof(uint8_t)];
    uint8_t halfHV[2*2*sizeof(uint8_t)];
    put_h264_qpel2_h_lowpass_8(halfH, src + stride, 2 * sizeof(uint8_t), stride);
    put_h264_qpel2_hv_lowpass_8(halfHV, tmp, src, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, halfH, halfHV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    int16_t tmp[2*(2 +5)*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    uint8_t halfHV[2*2*sizeof(uint8_t)];
    copy_block2_8(full, src - stride * 2, 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_h264_qpel2_hv_lowpass_8(halfHV, tmp, src, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, halfV, halfHV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static void put_h264_qpel2_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[2*(2 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 2 * 2 * sizeof(uint8_t);
    int16_t tmp[2*(2 +5)*sizeof(uint8_t)];
    uint8_t halfV[2*2*sizeof(uint8_t)];
    uint8_t halfHV[2*2*sizeof(uint8_t)];
    copy_block2_8(full, src - stride * 2 + sizeof(uint8_t), 2 * sizeof(uint8_t), stride, 2 + 5);
    put_h264_qpel2_v_lowpass_8(halfV, full_mid, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t));
    put_h264_qpel2_hv_lowpass_8(halfHV, tmp, src, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), stride);
    put_pixels2_l2_8(dst, halfV, halfHV, stride, 2 * sizeof(uint8_t), 2 * sizeof(uint8_t), 2);
}
static __attribute__((unused)) void put_h264_qpel4_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_pixels4_8_c(dst, src, stride, 4);
}
static void put_h264_qpel4_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(half, src, 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, src, half, stride, stride, 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_h264_qpel4_h_lowpass_8(dst, src, stride, stride);
}
static void put_h264_qpel4_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(half, src, 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t half[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(half, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, full_mid, half, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(dst, full_mid, stride, 4 * sizeof(uint8_t));
}
static void put_h264_qpel4_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t half[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(half, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, full_mid + 4 * sizeof(uint8_t), half, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4
                  + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    put_h264_qpel4_hv_lowpass_8(dst, tmp, src, stride, 4 * sizeof(uint8_t), stride);
}
static void put_h264_qpel4_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, halfH, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, halfH, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, halfV, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void put_h264_qpel4_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    put_pixels4_l2_8(dst, halfV, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static __attribute__((unused)) void put_h264_qpel8_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_pixels8_8_c(dst, src, stride, 8);
}
static void put_h264_qpel8_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(half, src, 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, src, half, stride, stride, 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_h264_qpel8_h_lowpass_8(dst, src, stride, stride);
}
static void put_h264_qpel8_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(half, src, 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t half[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(half, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, full_mid, half, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(dst, full_mid, stride, 8 * sizeof(uint8_t));
}
static void put_h264_qpel8_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t half[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(half, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, full_mid + 8 * sizeof(uint8_t), half, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8
                  + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    put_h264_qpel8_hv_lowpass_8(dst, tmp, src, stride, 8 * sizeof(uint8_t), stride);
}
static void put_h264_qpel8_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, halfH, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, halfH, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, halfV, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void put_h264_qpel8_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    put_pixels8_l2_8(dst, halfV, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static __attribute__((unused)) void put_h264_qpel16_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_pixels16_8_c(dst, src, stride, 16);
}
static void put_h264_qpel16_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(half, src, 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, src, half, stride, stride, 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    put_h264_qpel16_h_lowpass_8(dst, src, stride, stride);
}
static void put_h264_qpel16_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(half, src, 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t half[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(half, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, full_mid, half, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(dst, full_mid, stride, 16 * sizeof(uint8_t));
}
static void put_h264_qpel16_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t half[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(half, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, full_mid + 16 * sizeof(uint8_t), half, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src + stride, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(
        halfH, src + stride, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    put_h264_qpel16_hv_lowpass_8(dst, tmp, src, stride, 16 * sizeof(uint8_t), stride);
}
static void put_h264_qpel16_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, halfH, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src + stride, 16 * sizeof(uint8_t), stride);
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, halfH, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, halfV, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void put_h264_qpel16_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    put_pixels16_l2_8(dst, halfV, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static __attribute__((unused)) void avg_h264_qpel4_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_pixels4_8_c(dst, src, stride, 4);
}
static void avg_h264_qpel4_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(half, src, 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, src, half, stride, stride, 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_h264_qpel4_h_lowpass_8(dst, src, stride, stride);
}
static void avg_h264_qpel4_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(half, src, 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t half[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(half, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, full_mid, half, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    avg_h264_qpel4_v_lowpass_8(dst, full_mid, stride, 4 * sizeof(uint8_t));
}
static void avg_h264_qpel4_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t half[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(half, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, full_mid + 4 * sizeof(uint8_t), half, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4
                  + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    avg_pixels4_l2_8(dst, halfH, halfV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    avg_h264_qpel4_hv_lowpass_8(dst, tmp, src, stride, 4 * sizeof(uint8_t), stride);
}
static void avg_h264_qpel4_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src, 4 * sizeof(uint8_t), stride);
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, halfH, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfH[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    put_h264_qpel4_h_lowpass_8(halfH, src + stride, 4 * sizeof(uint8_t), stride);
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, halfH, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2, 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, halfV, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static void avg_h264_qpel4_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[4*(4 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 4 * 2 * sizeof(uint8_t);
    int16_t tmp[4*(4 +5)*sizeof(uint8_t)];
    uint8_t halfV[4*4*sizeof(uint8_t)];
    uint8_t halfHV[4*4*sizeof(uint8_t)];
    copy_block4_8(full, src - stride * 2 + sizeof(uint8_t), 4 * sizeof(uint8_t), stride, 4 + 5);
    put_h264_qpel4_v_lowpass_8(halfV, full_mid, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t));
    put_h264_qpel4_hv_lowpass_8(halfHV, tmp, src, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), stride);
    avg_pixels4_l2_8(dst, halfV, halfHV, stride, 4 * sizeof(uint8_t), 4 * sizeof(uint8_t), 4);
}
static __attribute__((unused)) void avg_h264_qpel8_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_pixels8_8_c(dst, src, stride, 8);
}
static void avg_h264_qpel8_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(half, src, 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, src, half, stride, stride, 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_h264_qpel8_h_lowpass_8(dst, src, stride, stride);
}
static void avg_h264_qpel8_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(half, src, 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t half[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(half, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, full_mid, half, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    avg_h264_qpel8_v_lowpass_8(dst, full_mid, stride, 8 * sizeof(uint8_t));
}
static void avg_h264_qpel8_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t half[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(half, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, full_mid + 8 * sizeof(uint8_t), half, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8
                  + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    avg_pixels8_l2_8(dst, halfH, halfV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    avg_h264_qpel8_hv_lowpass_8(dst, tmp, src, stride, 8 * sizeof(uint8_t), stride);
}
static void avg_h264_qpel8_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src, 8 * sizeof(uint8_t), stride);
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, halfH, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfH[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    put_h264_qpel8_h_lowpass_8(halfH, src + stride, 8 * sizeof(uint8_t), stride);
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, halfH, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2, 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, halfV, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static void avg_h264_qpel8_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[8*(8 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 8 * 2 * sizeof(uint8_t);
    int16_t tmp[8*(8 +5)*sizeof(uint8_t)];
    uint8_t halfV[8*8*sizeof(uint8_t)];
    uint8_t halfHV[8*8*sizeof(uint8_t)];
    copy_block8_8(full, src - stride * 2 + sizeof(uint8_t), 8 * sizeof(uint8_t), stride, 8 + 5);
    put_h264_qpel8_v_lowpass_8(halfV, full_mid, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t));
    put_h264_qpel8_hv_lowpass_8(halfHV, tmp, src, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), stride);
    avg_pixels8_l2_8(dst, halfV, halfHV, stride, 8 * sizeof(uint8_t), 8 * sizeof(uint8_t), 8);
}
static __attribute__((unused)) void avg_h264_qpel16_mc00_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_pixels16_8_c(dst, src, stride, 16);
}
static void avg_h264_qpel16_mc10_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(half, src, 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, src, half, stride, stride, 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc20_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    avg_h264_qpel16_h_lowpass_8(dst, src, stride, stride);
}
static void avg_h264_qpel16_mc30_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t half[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(half, src, 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, src + sizeof(uint8_t), half, stride, stride, 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc01_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t half[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(half, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, full_mid, half, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc02_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    avg_h264_qpel16_v_lowpass_8(dst, full_mid, stride, 16 * sizeof(uint8_t));
}
static void avg_h264_qpel16_mc03_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t half[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(half, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, full_mid + 16 * sizeof(uint8_t), half, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc11_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc31_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc13_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src + stride, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc33_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(
        halfH, src + stride, 16 * sizeof(uint8_t), stride);
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    avg_pixels16_l2_8(dst, halfH, halfV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc22_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    avg_h264_qpel16_hv_lowpass_8(dst, tmp, src, stride, 16 * sizeof(uint8_t), stride);
}
static void avg_h264_qpel16_mc21_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src, 16 * sizeof(uint8_t), stride);
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, halfH, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc23_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfH[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    put_h264_qpel16_h_lowpass_8(halfH, src + stride, 16 * sizeof(uint8_t), stride);
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, halfH, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc12_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2, 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, halfV, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}
static void avg_h264_qpel16_mc32_8_c(uint8_t *dst, uint8_t *src, int stride)
{
    uint8_t full[16*(16 +5)*sizeof(uint8_t)];
    uint8_t *const full_mid = full + 16 * 2 * sizeof(uint8_t);
    int16_t tmp[16*(16 +5)*sizeof(uint8_t)];
    uint8_t halfV[16*16*sizeof(uint8_t)];
    uint8_t halfHV[16*16*sizeof(uint8_t)];
    copy_block16_8(full, src - stride * 2 + sizeof(uint8_t), 16 * sizeof(uint8_t), stride, 16 + 5);
    put_h264_qpel16_v_lowpass_8(halfV, full_mid, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t));
    put_h264_qpel16_hv_lowpass_8(halfHV, tmp, src, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), stride);
    avg_pixels16_l2_8(dst, halfV, halfHV, stride, 16 * sizeof(uint8_t), 16 * sizeof(uint8_t), 16);
}


#if 0
#undef op_avg
#undef op_put
#undef op2_avg
#undef op2_put


#define put_h264_qpel8_mc00_8_c  ff_put_pixels8x8_8_c
#define avg_h264_qpel8_mc00_8_c  ff_avg_pixels8x8_8_c
#define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
#define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
#endif


void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride)
{
    FUNCC(put_pixels8)(dst, src, stride, 8);
}
void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride)
{
    FUNCC(avg_pixels8)(dst, src, stride, 8);
}
void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride)
{
    FUNCC(put_pixels16)(dst, src, stride, 16);
}
void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride)
{
    FUNCC(avg_pixels16)(dst, src, stride, 16);
}

static void FUNCC(clear_block)(DCTELEM *block)
{
    memset(block, 0, sizeof(dctcoef) * 64);
}

/**
 * memset(blocks, 0, sizeof(DCTELEM)*6*64)
 */
static void FUNCC(clear_blocks)(DCTELEM *blocks)
{
    memset(blocks, 0, sizeof(dctcoef) * 6 * 64);
}

static const uint8_t idct_sse2_row_perm[8] = {0, 4, 1, 5, 2, 6, 3, 7};

void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
{
    int i;
    int end;

    st->scantable = src_scantable;

    for(i = 0; i < 64; i++)
    {
        int j;
        j = src_scantable[i];
        st->permutated[i] = permutation[j];
    }

    end = -1;
    for(i = 0; i < 64; i++)
    {
        int j;
        j = st->permutated[i];
        if(j > end) end = j;
        st->raster_end[i] = end;
    }

}

static void just_return(void *mem av_unused, int stride av_unused, int h av_unused)
{
    return;
}

/* init static data */
av_cold void dsputil_static_init(void)
{
    int i;

    for(i = 0; i < 256; i++) ff_cropTbl[i + MAX_NEG_CROP] = i;
    for(i = 0; i < MAX_NEG_CROP; i++)
    {
        ff_cropTbl[i] = 0;
        ff_cropTbl[i + MAX_NEG_CROP + 256] = 255;
    }

    for(i = 0; i < 512; i++)
    {
        ff_squareTbl[i] = (i - 256) * (i - 256);
    }

    for(i = 0; i < 64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]] = i + 1;
}

int ff_check_alignment(void)
{
    static int did_fail = 0;
    DECLARE_ALIGNED(16, int, aligned);

    if((intptr_t)&aligned & 15)
    {
        if(!did_fail)
        {
            did_fail = 1;
        }
        return -1;
    }
    return 0;
}

av_cold void dsputil_init(DSPContext *c, AVCodecContext *avctx)
{
    int i;

    ff_check_alignment();

    if(avctx->idct_algo == FF_IDCT_FAAN)
    {
        c->idct_put = ff_faanidct_put;
        c->idct_add = ff_faanidct_add;
        c->idct    = ff_faanidct;
        c->idct_permutation_type = FF_NO_IDCT_PERM;
    }
    else  //accurate/default
    {
        c->idct_put = ff_simple_idct_put;
        c->idct_add = ff_simple_idct_add;
        c->idct    = ff_simple_idct;
        c->idct_permutation_type = FF_NO_IDCT_PERM;
    }

    c->prefetch = just_return;
    c->draw_edges = draw_edges_8_c;
    c->emulated_edge_mc = ff_emulated_edge_mc_8;
    c->clear_block = clear_block_8_c;
    c->clear_blocks = clear_blocks_8_c;
    c->add_pixels8 = add_pixels8_8_c;
    c->add_pixels4 = add_pixels4_8_c;

    c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_8_c;
    c->put_h264_chroma_pixels_tab[1] = put_h264_chroma_mc4_8_c;
    c->put_h264_chroma_pixels_tab[2] = put_h264_chroma_mc2_8_c;
    c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_8_c;
    c->avg_h264_chroma_pixels_tab[1] = avg_h264_chroma_mc4_8_c;
    c->avg_h264_chroma_pixels_tab[2] = avg_h264_chroma_mc2_8_c;

    c->put_h264_qpel_pixels_tab[0][ 0] = ff_put_pixels16x16_8_c;
    c->put_h264_qpel_pixels_tab[0][ 1] = put_h264_qpel16_mc10_8_c;
    c->put_h264_qpel_pixels_tab[0][ 2] = put_h264_qpel16_mc20_8_c;
    c->put_h264_qpel_pixels_tab[0][ 3] = put_h264_qpel16_mc30_8_c;
    c->put_h264_qpel_pixels_tab[0][ 4] = put_h264_qpel16_mc01_8_c;
    c->put_h264_qpel_pixels_tab[0][ 5] = put_h264_qpel16_mc11_8_c;
    c->put_h264_qpel_pixels_tab[0][ 6] = put_h264_qpel16_mc21_8_c;
    c->put_h264_qpel_pixels_tab[0][ 7] = put_h264_qpel16_mc31_8_c;
    c->put_h264_qpel_pixels_tab[0][ 8] = put_h264_qpel16_mc02_8_c;
    c->put_h264_qpel_pixels_tab[0][ 9] = put_h264_qpel16_mc12_8_c;
    c->put_h264_qpel_pixels_tab[0][10] = put_h264_qpel16_mc22_8_c;
    c->put_h264_qpel_pixels_tab[0][11] = put_h264_qpel16_mc32_8_c;
    c->put_h264_qpel_pixels_tab[0][12] = put_h264_qpel16_mc03_8_c;
    c->put_h264_qpel_pixels_tab[0][13] = put_h264_qpel16_mc13_8_c;
    c->put_h264_qpel_pixels_tab[0][14] = put_h264_qpel16_mc23_8_c;
    c->put_h264_qpel_pixels_tab[0][15] = put_h264_qpel16_mc33_8_c;
    c->put_h264_qpel_pixels_tab[1][ 0] = ff_put_pixels8x8_8_c;
    c->put_h264_qpel_pixels_tab[1][ 1] = put_h264_qpel8_mc10_8_c;
    c->put_h264_qpel_pixels_tab[1][ 2] = put_h264_qpel8_mc20_8_c;
    c->put_h264_qpel_pixels_tab[1][ 3] = put_h264_qpel8_mc30_8_c;
    c->put_h264_qpel_pixels_tab[1][ 4] = put_h264_qpel8_mc01_8_c;
    c->put_h264_qpel_pixels_tab[1][ 5] = put_h264_qpel8_mc11_8_c;
    c->put_h264_qpel_pixels_tab[1][ 6] = put_h264_qpel8_mc21_8_c;
    c->put_h264_qpel_pixels_tab[1][ 7] = put_h264_qpel8_mc31_8_c;
    c->put_h264_qpel_pixels_tab[1][ 8] = put_h264_qpel8_mc02_8_c;
    c->put_h264_qpel_pixels_tab[1][ 9] = put_h264_qpel8_mc12_8_c;
    c->put_h264_qpel_pixels_tab[1][10] = put_h264_qpel8_mc22_8_c;
    c->put_h264_qpel_pixels_tab[1][11] = put_h264_qpel8_mc32_8_c;
    c->put_h264_qpel_pixels_tab[1][12] = put_h264_qpel8_mc03_8_c;
    c->put_h264_qpel_pixels_tab[1][13] = put_h264_qpel8_mc13_8_c;
    c->put_h264_qpel_pixels_tab[1][14] = put_h264_qpel8_mc23_8_c;
    c->put_h264_qpel_pixels_tab[1][15] = put_h264_qpel8_mc33_8_c;
    c->put_h264_qpel_pixels_tab[2][ 0] = put_h264_qpel4_mc00_8_c;
    c->put_h264_qpel_pixels_tab[2][ 1] = put_h264_qpel4_mc10_8_c;
    c->put_h264_qpel_pixels_tab[2][ 2] = put_h264_qpel4_mc20_8_c;
    c->put_h264_qpel_pixels_tab[2][ 3] = put_h264_qpel4_mc30_8_c;
    c->put_h264_qpel_pixels_tab[2][ 4] = put_h264_qpel4_mc01_8_c;
    c->put_h264_qpel_pixels_tab[2][ 5] = put_h264_qpel4_mc11_8_c;
    c->put_h264_qpel_pixels_tab[2][ 6] = put_h264_qpel4_mc21_8_c;
    c->put_h264_qpel_pixels_tab[2][ 7] = put_h264_qpel4_mc31_8_c;
    c->put_h264_qpel_pixels_tab[2][ 8] = put_h264_qpel4_mc02_8_c;
    c->put_h264_qpel_pixels_tab[2][ 9] = put_h264_qpel4_mc12_8_c;
    c->put_h264_qpel_pixels_tab[2][10] = put_h264_qpel4_mc22_8_c;
    c->put_h264_qpel_pixels_tab[2][11] = put_h264_qpel4_mc32_8_c;
    c->put_h264_qpel_pixels_tab[2][12] = put_h264_qpel4_mc03_8_c;
    c->put_h264_qpel_pixels_tab[2][13] = put_h264_qpel4_mc13_8_c;
    c->put_h264_qpel_pixels_tab[2][14] = put_h264_qpel4_mc23_8_c;
    c->put_h264_qpel_pixels_tab[2][15] = put_h264_qpel4_mc33_8_c;
    c->put_h264_qpel_pixels_tab[3][ 0] = put_h264_qpel2_mc00_8_c;
    c->put_h264_qpel_pixels_tab[3][ 1] = put_h264_qpel2_mc10_8_c;
    c->put_h264_qpel_pixels_tab[3][ 2] = put_h264_qpel2_mc20_8_c;
    c->put_h264_qpel_pixels_tab[3][ 3] = put_h264_qpel2_mc30_8_c;
    c->put_h264_qpel_pixels_tab[3][ 4] = put_h264_qpel2_mc01_8_c;
    c->put_h264_qpel_pixels_tab[3][ 5] = put_h264_qpel2_mc11_8_c;
    c->put_h264_qpel_pixels_tab[3][ 6] = put_h264_qpel2_mc21_8_c;
    c->put_h264_qpel_pixels_tab[3][ 7] = put_h264_qpel2_mc31_8_c;
    c->put_h264_qpel_pixels_tab[3][ 8] = put_h264_qpel2_mc02_8_c;
    c->put_h264_qpel_pixels_tab[3][ 9] = put_h264_qpel2_mc12_8_c;
    c->put_h264_qpel_pixels_tab[3][10] = put_h264_qpel2_mc22_8_c;
    c->put_h264_qpel_pixels_tab[3][11] = put_h264_qpel2_mc32_8_c;
    c->put_h264_qpel_pixels_tab[3][12] = put_h264_qpel2_mc03_8_c;
    c->put_h264_qpel_pixels_tab[3][13] = put_h264_qpel2_mc13_8_c;
    c->put_h264_qpel_pixels_tab[3][14] = put_h264_qpel2_mc23_8_c;
    c->put_h264_qpel_pixels_tab[3][15] = put_h264_qpel2_mc33_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 0] = ff_avg_pixels16x16_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 1] = avg_h264_qpel16_mc10_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 2] = avg_h264_qpel16_mc20_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 3] = avg_h264_qpel16_mc30_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 4] = avg_h264_qpel16_mc01_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 5] = avg_h264_qpel16_mc11_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 6] = avg_h264_qpel16_mc21_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 7] = avg_h264_qpel16_mc31_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 8] = avg_h264_qpel16_mc02_8_c;
    c->avg_h264_qpel_pixels_tab[0][ 9] = avg_h264_qpel16_mc12_8_c;
    c->avg_h264_qpel_pixels_tab[0][10] = avg_h264_qpel16_mc22_8_c;
    c->avg_h264_qpel_pixels_tab[0][11] = avg_h264_qpel16_mc32_8_c;
    c->avg_h264_qpel_pixels_tab[0][12] = avg_h264_qpel16_mc03_8_c;
    c->avg_h264_qpel_pixels_tab[0][13] = avg_h264_qpel16_mc13_8_c;
    c->avg_h264_qpel_pixels_tab[0][14] = avg_h264_qpel16_mc23_8_c;
    c->avg_h264_qpel_pixels_tab[0][15] = avg_h264_qpel16_mc33_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 0] = ff_avg_pixels8x8_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 1] = avg_h264_qpel8_mc10_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 2] = avg_h264_qpel8_mc20_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 3] = avg_h264_qpel8_mc30_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 4] = avg_h264_qpel8_mc01_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 5] = avg_h264_qpel8_mc11_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 6] = avg_h264_qpel8_mc21_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 7] = avg_h264_qpel8_mc31_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 8] = avg_h264_qpel8_mc02_8_c;
    c->avg_h264_qpel_pixels_tab[1][ 9] = avg_h264_qpel8_mc12_8_c;
    c->avg_h264_qpel_pixels_tab[1][10] = avg_h264_qpel8_mc22_8_c;
    c->avg_h264_qpel_pixels_tab[1][11] = avg_h264_qpel8_mc32_8_c;
    c->avg_h264_qpel_pixels_tab[1][12] = avg_h264_qpel8_mc03_8_c;
    c->avg_h264_qpel_pixels_tab[1][13] = avg_h264_qpel8_mc13_8_c;
    c->avg_h264_qpel_pixels_tab[1][14] = avg_h264_qpel8_mc23_8_c;
    c->avg_h264_qpel_pixels_tab[1][15] = avg_h264_qpel8_mc33_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 0] = avg_h264_qpel4_mc00_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 1] = avg_h264_qpel4_mc10_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 2] = avg_h264_qpel4_mc20_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 3] = avg_h264_qpel4_mc30_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 4] = avg_h264_qpel4_mc01_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 5] = avg_h264_qpel4_mc11_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 6] = avg_h264_qpel4_mc21_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 7] = avg_h264_qpel4_mc31_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 8] = avg_h264_qpel4_mc02_8_c;
    c->avg_h264_qpel_pixels_tab[2][ 9] = avg_h264_qpel4_mc12_8_c;
    c->avg_h264_qpel_pixels_tab[2][10] = avg_h264_qpel4_mc22_8_c;
    c->avg_h264_qpel_pixels_tab[2][11] = avg_h264_qpel4_mc32_8_c;
    c->avg_h264_qpel_pixels_tab[2][12] = avg_h264_qpel4_mc03_8_c;
    c->avg_h264_qpel_pixels_tab[2][13] = avg_h264_qpel4_mc13_8_c;
    c->avg_h264_qpel_pixels_tab[2][14] = avg_h264_qpel4_mc23_8_c;
    c->avg_h264_qpel_pixels_tab[2][15] = avg_h264_qpel4_mc33_8_c;

    memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
    memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));

    for(i = 0; i < 64; i++)
    {
        if(!c->put_2tap_qpel_pixels_tab[0][i])
            c->put_2tap_qpel_pixels_tab[0][i] = c->put_h264_qpel_pixels_tab[0][i];
        if(!c->avg_2tap_qpel_pixels_tab[0][i])
            c->avg_2tap_qpel_pixels_tab[0][i] = c->avg_h264_qpel_pixels_tab[0][i];
    }

    switch(c->idct_permutation_type)
    {
    case FF_NO_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = i;
        break;

    case FF_LIBMPEG2_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
        break;
    case FF_SIMPLE_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = simple_mmx_permutation[i];
        break;
    case FF_TRANSPOSE_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
        break;
    case FF_PARTTRANS_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
        break;
    case FF_SSE2_IDCT_PERM:
        for(i = 0; i < 64; i++)
            c->idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i&7];
        break;

    default:
        av_log(avctx, AV_LOG_ERROR, "Internal error, IDCT permutation not set\n");
    }
}

