
#include "ffcodec.h"

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
}

#include <iostream>
using namespace std;

//不带参数 和 带参数
#define CCCOL_LOG(fmt) fprintf(stdout, "--->> [CCCOL_LOG] %s(%d): " fmt, __func__, __LINE__)
#define CCCOL_LOG2(fmt, argv...) fprintf(stdout, "--->> [CCCOL_LOG] %s(%d): " fmt, __func__, __LINE__, ##argv)

// ----- 格式枚举对应和互换 -----
typedef struct
{
    CodecControlOhosFormat forcodec;
    enum AVPixelFormat forffmpeg;
} FFCodec_FormatTranslate;

static const FFCodec_FormatTranslate ffcodec_formatGrid[] =
{
    {CODEC_FORMAT_RGB, AV_PIX_FMT_RGB24},
    {CODEC_FORMAT_BGR, AV_PIX_FMT_BGR24},
    {CODEC_FORMAT_ARGB, AV_PIX_FMT_ARGB},
    {CODEC_FORMAT_ABGR, AV_PIX_FMT_ABGR},
    {CODEC_FORMAT_RGBA, AV_PIX_FMT_RGBA},
    {CODEC_FORMAT_BGRA, AV_PIX_FMT_BGRA},

    {CODEC_FORMAT_YUV420P, AV_PIX_FMT_YUV420P},
    {CODEC_FORMAT_YVU420P, AV_PIX_FMT_YUV420P},

    {CODEC_FORMAT_YUV422P, AV_PIX_FMT_YUV422P},
    {CODEC_FORMAT_YVU422P, AV_PIX_FMT_YUV422P},

    {CODEC_FORMAT_YUV420SP, AV_PIX_FMT_NV12},
    {CODEC_FORMAT_YVU420SP, AV_PIX_FMT_NV21},

    {CODEC_FORMAT_YUV422SP, AV_PIX_FMT_NV16},
    {CODEC_FORMAT_YVU422SP, AV_PIX_FMT_NV16},

    {CODEC_FORMAT_YUV400, AV_PIX_FMT_Y400A},
};

enum AVPixelFormat ffcodec_to_ffmpeg(CodecControlOhosFormat format)
{
    for (uint32_t i = 0; i < sizeof(ffcodec_formatGrid) / sizeof(ffcodec_formatGrid[0]); i++)
    {
        if (ffcodec_formatGrid[i].forcodec == format)
            return ffcodec_formatGrid[i].forffmpeg;
    }
    CCCOL_LOG2("Unsupported format(%d) !! \n", format);
    return AV_PIX_FMT_NONE;
}

CodecControlOhosFormat ffmpeg_to_ffcodec(enum AVPixelFormat format)
{
    for (uint32_t i = 0; i < sizeof(ffcodec_formatGrid) / sizeof(ffcodec_formatGrid[0]); i++)
    {
        if (ffcodec_formatGrid[i].forffmpeg == format)
            return ffcodec_formatGrid[i].forcodec;
    }
    CCCOL_LOG2("Unsupported format(%d) !! \n", format);
    return CODEC_FORMAT_UNKNOWN;
}

// ----- ffmpeg编解码 -----

typedef struct
{
    CodecControlOhosType type;
    CodecControlOhosFormat format;
    AVCodecContext *pAVCodecContext;
    AVFrame *frameCodec;
    AVFrame *frameYUV;
    AVPacket packet;
} FFCodec_Priv;

char* ffcodec_info(void)
{
    const size_t size = 2048;
    static char info[size] = {};
    size_t len = 0;
    memset(info, 0, size);
    avcodec_register_all();
    AVInputFormat *if_temp = av_iformat_next(NULL);
    while (if_temp != NULL){
        len += snprintf(&info[len], size - len, "ffInput: %s\r\n", if_temp->name);
        if_temp = if_temp->next;
    }
    AVOutputFormat *of_temp = av_oformat_next(NULL);
    while (of_temp != NULL){
        len += snprintf(&info[len], size - len, "ffOutput: %s\r\n", of_temp->name);
        of_temp = of_temp->next;
    }
    if (len == 0)
        strcpy(info, "There's no ffmpeg working info\r\n");
    return info;
}

/*
 *  视频编解码初始化
 *  参数:
 *      type: 编码还是解码
 *      format: 具体格式
 *      encodedFormat: 被编码格式,如编码h264需指定yuv格式
 *      groupSize: 一组图像中的图像数量,图像运动剧烈可以设小点,当然数据量也会变大
 *  返回: 控制句柄, NULL失败
 */
void* ffcodec_init(
        CodecControlOhosType type,
        CodecControlOhosFormat inputFormat,
        CodecControlOhosFormat outputFormat,
        uint32_t width,
        uint32_t height,
        uint32_t fps,
        uint32_t groupSize)
{
    avcodec_register_all();

    AVCodec *pAVCodec;
    if (type == CODEC_TYPE_DECODE && inputFormat == CODEC_FORMAT_H264)
        pAVCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
    else if (type == CODEC_TYPE_DECODE && inputFormat == CODEC_FORMAT_H265)
        pAVCodec = avcodec_find_decoder(AV_CODEC_ID_H265);
    else if (type == CODEC_TYPE_ENCODE && outputFormat == CODEC_FORMAT_H264)
        pAVCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    else if (type == CODEC_TYPE_ENCODE && outputFormat == CODEC_FORMAT_H265)
        pAVCodec = avcodec_find_encoder(AV_CODEC_ID_H265);
    else
    {
        CCCOL_LOG2("Unsupported Encode/Decode(%d-%d) action !! \n", type, inputFormat);
        return NULL;
    }

    if (!pAVCodec)
    {
        CCCOL_LOG2("avcodec_find_en/decoder(%d-%d) failed \n", type, inputFormat);
        return NULL;
    }

    AVCodecContext *pAVCodecContext = avcodec_alloc_context3(pAVCodec);
    if (!pAVCodecContext)
    {
        CCCOL_LOG2("avcodec_alloc_context3(%d-%d) failed \n", type, inputFormat);
        return NULL;
    }

    if (type == CODEC_TYPE_ENCODE)
    {
        pAVCodecContext->pix_fmt = ffcodec_to_ffmpeg(inputFormat);

        pAVCodecContext->bit_rate = 0; //400000;

        pAVCodecContext->width = width;
        pAVCodecContext->height = height;

        pAVCodecContext->time_base = (AVRational){1, (int)fps};
        pAVCodecContext->framerate = (AVRational){(int)fps, 1};

        pAVCodecContext->gop_size = groupSize; // 每组图像帧数量

        // preset(速度,越快则质量低):
        // "ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo", 0
        // av_opt_set(pAVCodecContext->priv_data, "preset", "superfast", 0);

        // tune(场景,zerolatency零延迟,表示不滤波):
        // "film", "animation", "grain", "stillimage", "psnr", "ssim", "fastdecode", "zerolatency", 0
        // av_opt_set(pAVCodecContext->priv_data, "tune", "zerolatency", 0); // 会崩溃...
    }

    av_opt_set(pAVCodecContext->priv_data, "preset", "superfast", 0);
    av_opt_set(pAVCodecContext->priv_data, "tune", "fastdecode", 0);

    pAVCodecContext->coder_type = FF_CODER_TYPE_AC;
    pAVCodecContext->flags |= CODEC_FLAG_LOOP_FILTER | CODEC_FLAG_LOW_DELAY;
    pAVCodecContext->me_cmp |= FF_CMP_CHROMA;
    pAVCodecContext->me_method = ME_EPZS;
    pAVCodecContext->me_subpel_quality = 4;
    pAVCodecContext->me_range = 16;
    pAVCodecContext->scenechange_threshold = 40;
    pAVCodecContext->i_quant_factor = (float)0.71;
    pAVCodecContext->qcompress = (float)0.7;
    pAVCodecContext->qmin = 10;
    pAVCodecContext->qmax = 51;
    pAVCodecContext->max_qdiff = 4;
    pAVCodecContext->refs = 2;
    pAVCodecContext->trellis = 1;
    pAVCodecContext->flags2 |= CODEC_FLAG2_FAST;
    
    pAVCodecContext->max_b_frames = 0; // b帧数量,0不启用
    pAVCodecContext->b_frame_strategy = 0; // 是否自动决定插入B帧数,1自动,0按最大B帧数

    if (avcodec_open2(pAVCodecContext, pAVCodec, NULL) != 0)
    {
        CCCOL_LOG2("avcodec_open2 typt(%d-%d) failed \n", type, inputFormat);
        avcodec_free_context(&pAVCodecContext);
        return NULL;
    }

    FFCodec_Priv *priv = new FFCodec_Priv();
    priv->type = type;
    priv->format = inputFormat;
    priv->pAVCodecContext = pAVCodecContext;
    priv->frameCodec = av_frame_alloc();
    if (type == CODEC_TYPE_ENCODE)
    {
        priv->frameCodec->format = ffcodec_to_ffmpeg(inputFormat);
        priv->frameCodec->width = pAVCodecContext->width;
        priv->frameCodec->height = pAVCodecContext->height;
        // if (av_frame_get_buffer(priv->frameCodec, 32) != 0)
        // {
        //     CCCOL_LOG2("av_frame_get_buffer typt(%d-%d-%dx%d) failed \n",
        //         type, format, width, height);
        //     av_frame_unref(priv->frameCodec);
        //     av_free(priv->frameCodec);
        //     free(priv);
        //     avcodec_free_context(&pAVCodecContext);
        //     return NULL;
        // }
    }
    else
    {
        priv->frameYUV = av_frame_alloc();
    }
    av_init_packet(&priv->packet);

    return static_cast<void *>(priv);
}

void ffcodec_deinit(void *handle)
{
    FFCodec_Priv *priv = static_cast<FFCodec_Priv*>(handle);
    if (!priv)
        return;
    if (priv->pAVCodecContext)
        avcodec_free_context(&priv->pAVCodecContext);
    if (priv->frameCodec)
    {
        av_frame_unref(priv->frameCodec);
        av_free(priv->frameCodec);
    }
    if (priv->frameYUV)
    {
        av_frame_unref(priv->frameYUV);
        av_free(priv->frameYUV);
    }
    av_packet_unref(&priv->packet);
    delete priv;
}

/*
 *  视频解码
 *  参数:
 *      buff: 一帧待解码数据
 *      buffSize: buff字节长度
 *      yuvMap: 数组yuvMap[3]的首地址,用于返回yuv数据(不需要用后释放)
 *      rgbMap: 返回rgb数据,需提前配置好map、width、height、format参数(自行管理内存分配和释放)
 *  返回: 0成功 -1失败
 */
int32_t ffcodec_decode(void *handle, uint8_t *buff, uint32_t buffSize, FFCodec_Map yuvMap[3], FFCodec_Map *rgbMap)
{
    FFCodec_Priv *priv = static_cast<FFCodec_Priv *>(handle);
    int32_t ret;

    av_frame_unref(priv->frameCodec);
    av_frame_unref(priv->frameYUV);

    priv->packet.data = buff;
    priv->packet.size = buffSize;

    ret = avcodec_send_packet(priv->pAVCodecContext, &priv->packet);
    if (ret != 0)
    {
        // 非数据帧不输出数据,但不代表没有解码需要
        // CCCOL_LOG2("avcodec_send_packet failed: %d - %02X %02X %02X %02X %02X \n",
        //           buffSize, buff[0], buff[1], buff[2], buff[3], buff[4]);
        return -1;
    }

    ret = avcodec_receive_frame(priv->pAVCodecContext, priv->frameCodec);
    if (ret != 0)
    {
        CCCOL_LOG2("avcodec_receive_frame failed: %d - %02X %02X %02X %02X %02X \n",
                buffSize, buff[0], buff[1], buff[2], buff[3], buff[4]);
        return -1;
    }

    if (yuvMap)
    {
        yuvMap[0].pb = yuvMap[1].pb = yuvMap[2].pb = 1;
        yuvMap[0].format = yuvMap[1].format = yuvMap[2].format =
        ffmpeg_to_ffcodec((enum AVPixelFormat)priv->frameCodec->format);
        //Y帧
        yuvMap[0].width = priv->frameCodec->linesize[0];
        yuvMap[0].height = priv->frameCodec->height;
        yuvMap[0].map = priv->frameCodec->data[0];
        //U帧
        yuvMap[1].width = priv->frameCodec->linesize[1];
        yuvMap[1].height = priv->frameCodec->height * priv->frameCodec->linesize[1] / priv->frameCodec->width;
        yuvMap[1].map = priv->frameCodec->data[1];
        //V帧
        yuvMap[2].width = priv->frameCodec->linesize[2];
        yuvMap[2].height = priv->frameCodec->height * priv->frameCodec->linesize[2] / priv->frameCodec->width;
        yuvMap[2].map = priv->frameCodec->data[2];
    }

    if (rgbMap && rgbMap->map)
        return ffcodec_YUV_to_RGB(yuvMap, rgbMap);

    return 0;
}

/*
 *  视频编码
 *  参数:
 *      yuvMap: 数组yuvMap[3]的首地址,存储yuv数据,需自行分配好内存并设置正确width、height、format参数
 *      frameBuff: 返回编码后数据指针,请在下次调用该函数前使用完(不需要用后释放)
 *      frameBuffSize: 返回frameBuff数据长度
 *  返回: 0成功 -1失败
 */
int32_t ffcodec_encode(
    void *handle,
    FFCodec_Map yuvMap[3],
    uint8_t **frameBuff,
    uint32_t *frameBuffSize)
{
    FFCodec_Priv *priv = static_cast<FFCodec_Priv*>(handle);
    int32_t ret;
    int got_packet_ptr = 0;

    av_packet_unref(&priv->packet);

    // 配置输入数据参数(直接使用输入数据的内存)
    priv->frameCodec->format = ffcodec_to_ffmpeg(yuvMap[0].format);
    priv->frameCodec->width = yuvMap[0].width;
    priv->frameCodec->height = yuvMap[0].height;

    priv->frameCodec->data[0] = yuvMap[0].map;
    priv->frameCodec->data[1] = yuvMap[1].map;
    priv->frameCodec->data[2] = yuvMap[2].map;

    priv->frameCodec->linesize[0] = yuvMap[0].width;
    priv->frameCodec->linesize[1] = yuvMap[1].width;
    priv->frameCodec->linesize[2] = yuvMap[2].width;

    priv->frameCodec->pts += 1;

    ret = avcodec_encode_video2(
        priv->pAVCodecContext,
        &priv->packet,
        priv->frameCodec,
        &got_packet_ptr);
    if (ret != 0)
    {
        CCCOL_LOG2("avcodec_encode_video2: failed, ret/%d got_packet_ptr/%d \n",
        ret, got_packet_ptr);
        priv->frameCodec->data[0] = NULL; // 防止销毁时误释放
        priv->frameCodec->data[1] = NULL;
        priv->frameCodec->data[2] = NULL;
        return -1;
    }
    //yuv编码h26x时前几帧会作为滤波使用而不出编码数据(正常情况)
    else if (got_packet_ptr < 1)
        return 0;

    if (frameBuff)
        *frameBuff = priv->packet.data;
    if (frameBuffSize)
        *frameBuffSize = priv->packet.size;

    priv->frameCodec->data[0] = NULL; // 防止销毁时误释放
    priv->frameCodec->data[1] = NULL;
    priv->frameCodec->data[2] = NULL;

    return 0;
}

//yuv转rgb,返回0成功,rgbMap需提前配置好map、width、height、format参数
int32_t ffcodec_YUV_to_RGB(FFCodec_Map yuvMap[3], FFCodec_Map *rgbMap)
{
    int32_t ret;

    //YUV三维数据,代表各帧图像
    uint8_t *dataYUV[] = {yuvMap[0].map, yuvMap[1].map, yuvMap[2].map, NULL};
    //RGB一维数组,需注意颜色排列
    uint8_t *dataRGB[] = {rgbMap->map, NULL, NULL, NULL};

    //yuv各帧宽信息(高会自动比例匹配)
    int linesizeYUV[] = {(int)yuvMap[0].width, (int)yuvMap[1].width, (int)yuvMap[2].width, 0};
    //RGB缩放后宽高信息
    int linesizeRGB[] = {(int)(rgbMap->width * 3), (int)(rgbMap->height * 3), 0, 0};

    if (rgbMap->format != CODEC_FORMAT_RGB)
        rgbMap->format = CODEC_FORMAT_RGB;

    struct SwsContext *sContext = sws_getContext(
        yuvMap[0].width,
        yuvMap[0].height,
        ffcodec_to_ffmpeg(yuvMap[0].format),
        rgbMap->width,
        rgbMap->height,
        ffcodec_to_ffmpeg(rgbMap->format),
        SWS_FAST_BILINEAR,
        NULL, NULL, NULL);
    if (!sContext)
    {
        CCCOL_LOG2("sws_getContext failed: %dx%d(yuv) to %dx%d(rgb) \n",
        yuvMap[0].width, yuvMap[0].height, rgbMap->width, rgbMap->height);
        return -1;
    }

    ret = sws_scale(
        sContext,
        (const uint8_t *const *)dataYUV,
        linesizeYUV,
        0,
        yuvMap[0].height,
        dataRGB,
        linesizeRGB);
    // if (ret != 0)
    //     CCCOL_LOG2("sws_scale failed %d \n", ret);

    sws_freeContext(sContext);

    return 0;
}

// ----- 其它工具方法 -----

//查找h264/265帧位置,返回offset,-1失败
int32_t h26xFindFrame(const uint8_t *buff, uint32_t buffLen, int32_t *frameSize, char *type)
{
    int32_t offset = -1;
    int32_t offset2 = buffLen - 1;

    //查找头 0,0,1 或者 0,0,0,1
    for (uint32_t i = 0; i < buffLen - 3; i++)
    {
        if (buff[i] == 0 && buff[i + 1] == 0 &&
            (buff[i + 2] == 1 || (buff[i + 2] == 0 && buff[i + 3] == 1)))
        {
            offset = i;
            if (type)
            {
                uint8_t bType = buff[i + 2] == 1 ? buff[i + 3] : buff[i + 4];
                uint8_t bH264 = bType & 0x1F;
                uint8_t bH265 = (bType & 0x7E) >> 1;

                if (bH264 == 7 || bH265 == 33)
                    *type = 'S';
                else if (bH264 == 5 || bH265 == 19)
                    *type = 'I';
                else if (bH264 == 1 || bH265 == 1)
                    *type = 'P';
                // else if (bH264 == xx || bH265 == xx)
                //     *type = 'B';
                else
                    *type = 0;
            }
            break;
        }
    }

    if (offset < 0)
        return offset;

    //查找下一个 0,0,1 或者 0,0,0,1
    for (uint32_t i = offset + 3; i < buffLen - 3; i++)
    {
        if (buff[i] == 0 && buff[i + 1] == 0 &&
            (buff[i + 2] == 1 || (buff[i + 2] == 0 && buff[i + 3] == 1)))
        {
            offset2 = i;
            break;
        }
    }

    if (frameSize)
        *frameSize = offset2 - offset;
    return offset;
}

//移动缓冲区,配合H26xFindFrame()使用
void h26xMoveBuff(uint8_t *buff, int32_t &buffLen, int32_t &buffOffset, int32_t &frameSize)
{
    for (int32_t i = 0, j = buffOffset + frameSize; i < buffLen - buffOffset - frameSize;)
        buff[i++] = buff[j++];
    buffLen -= buffOffset + frameSize;
    buffOffset = frameSize = 0;
}

//基于上面两方法封装针对h26x数据流的fifo
H26xFiFo::H26xFiFo(int frameSize, int frameCount)
{
    buffSize = frameSize * frameCount;
    buff = new uint8_t[buffSize];

    index = 0;
    headLen = 0;
    tailLen = buffSize;

    pthread_mutex_init(&lock, NULL);
}
H26xFiFo::~H26xFiFo()
{
    if (buff)
        delete[] buff;
    pthread_mutex_destroy(&lock);
}
int H26xFiFo::Set(uint8_t *dat, int len)
{
    int retLen = 0;

    if (tailLen < len)
        return 0;

    pthread_mutex_lock(&lock);
    memcpy(buff + tailLen, dat, len);
    tailLen -= len;
    headLen = buffSize - tailLen;
    pthread_mutex_unlock(&lock);

    return retLen;
}
int H26xFiFo::Get(uint8_t *dat, int len)
{
    int retLen = 0;

    if (headLen < 1)
        return 0;

    int32_t frameSize = 0;
    char type = 0;
    int32_t offset = h26xFindFrame(buff, headLen, &frameSize, &type);
    if (offset >= 0 && frameSize > 0)
    {
        memcpy(dat, buff + offset, frameSize);
        retLen = frameSize;
    }

    pthread_mutex_lock(&lock);
    h26xMoveBuff(buff, headLen, offset, frameSize);
    tailLen = buffSize - headLen;
    pthread_mutex_unlock(&lock);

    return retLen;
}

