//
// Created by alexander on 2/20/19.
//
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include </home/alexander/CLionProjects/FFmpegLogTest/log/log.h>
#include "convertVideoToBmp.h"

#define INBUF_SIZE 4096

#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t

#pragma pack(2)
typedef struct tagBITMAPFILEHEADER
{
    WORD bfType;
    DWORD bfSize;
    WORD bfReserved1;
    WORD bfReserved2;
    DWORD bfOffBits;
}BITMAPFILEHEADER, *PBITMAPFILEHEADER;

typedef struct tagBITMAPINFOHEADER
{
    DWORD biSize;
    LONG biWidth;
    LONG biHeight;
    WORD biPlanes;
    WORD biBitCount;
    DWORD biCompression;
    DWORD biSizeImage;
    LONG biXPelsPerMeter;
    LONG biYPelsPerMeter;
    DWORD biClrUser;
    DWORD biClrImportant;
}BITMAPINFOHEADER, *PBITMAPINFOHEADER;

void saveBmp(struct SwsContext *img_convert_ctx, AVFrame *avFrame, char *fileName)
{
    /**YUV420P转换为RGB24*/
    int w = avFrame->width;
    int h = avFrame->height;

    int numBytes = avpicture_get_size(AV_PIX_FMT_BGR24, w, h);
    uint8_t  *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

    AVFrame *pFrameRGB = av_frame_alloc();

    avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_BGR24, w, h);

    /**进行像素转换（检查是否需要拉伸裁剪    并输出目标数据）
     * img_convert_ctx 上下文
     * avFrame->data 源数据
     * avFrame->linesize 源图片的宽度
     * 0 从第几行数据开始处理
     * h 需要处理的数据的高度
     * pFrameRGB->data 目标数据缓冲区
     * pFrameRGB->linesize 目标图片的宽度
     *
     * 经过这个函数解码后的avframe数据帧就被转换成了 AV_PIX_FMT_BGR24格式，宽和高是pFrameRGB->linesize X h的数据帧*/
    sws_scale(img_convert_ctx, avFrame->data, avFrame->linesize, 0, h, pFrameRGB->data, pFrameRGB->linesize);

    /**构造信息头*/
    BITMAPINFOHEADER header;
    header.biSize = sizeof(BITMAPINFOHEADER);
    header.biWidth = w;
    header.biHeight = h * -1;
    header.biBitCount = 24;
    header.biCompression = 0;
    header.biSizeImage = 0;
    header.biClrImportant = 0;
    header.biClrUser = 0;
    header.biXPelsPerMeter = 0;
    header.biYPelsPerMeter = 0;
    header.biPlanes = 1;
    /**构造文件头*/
    BITMAPFILEHEADER bmpFileHeader = {0,};
    DWORD dwTotalWriten = 0;
    DWORD dwWriten;

    bmpFileHeader.bfType = 0x4d42;
    bmpFileHeader.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + numBytes;
    bmpFileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);

    FILE *pf = fopen(fileName, "wb");
    /**写入头文件*/
    fwrite(&bmpFileHeader, sizeof(BITMAPFILEHEADER), 1, pf);
    fwrite(&header, sizeof(BITMAPINFOHEADER), 1, pf);
    /**写入数据*/
    fwrite(pFrameRGB->data[0], 1, numBytes, pf);
    fclose(pf);

    /**释放资源*/
    av_freep(&pFrameRGB[0]);
    av_free(pFrameRGB);
}


void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename)
{
    FILE *f;
    int i;

    f = fopen(filename, "w");
    fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
    for(i = 0; i < ysize; i++)
    {
        fwrite(buf + 1 * wrap, 1, xsize, f);
    }
    fclose(f);
}

int decode_write_frame(const char *outfilename, AVCodecContext *avCodecContext, struct SwsContext *img_convert_ctx,
        AVFrame *frame,int *frame_count, AVPacket *pkt, int last)
{
    int len, got_frame;
    char buf[1024];

    /**把avPacket中的压缩编码数据解码到frame中*/
    len = avcodec_decode_video2(avCodecContext, frame, &got_frame, pkt);
    if(len < 0)
    {
        fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
        return len;
    }

    if(got_frame)
    {
        printf("Saving %s frame %3d\n", last ? "last " : "", *frame_count);
        fflush(stdout);

        /**图片的内存由解码器释放，不用手动释放*/
        snprintf(buf, sizeof(buf), "%s---%d.bmp", outfilename, *frame_count);

        saveBmp(img_convert_ctx, frame, buf);

        (*frame_count)++;
    }
    return 0;
}

int convertVideoToBmp(const char *inputFileName, const char *outputFileName)
{
    int returnCode;
    FILE *file;
    AVFormatContext *inputAvFormatContext;
    const AVCodec *inputAVCodec;
    AVCodecContext *outputAVCodecContext = NULL;
    AVStream * inputAVStream = NULL;
    int streamIndex;
    int frameCount;
    AVFrame *avFrame;
    struct SwsContext *img_convert_ctx;
    AVPacket avPacket;

    av_register_all();

    /*AVInputFormat avInputFormat;
    returnCode = av_guess_format(NULL, inputFileName, NULL);
    if(returnCode < 0)
    {
        printf("Could not guess inputfile format.\n");
    }*/
    /**打开输入文件，并为inputContext申请空间*/
    returnCode = avformat_open_input(&inputAvFormatContext, inputFileName, NULL, NULL);
    if(returnCode < 0)
    {
        printf("Could not open source file %s\n", inputFileName);
        exit(1);
    }

    returnCode = avformat_find_stream_info(inputAvFormatContext, NULL);
    if(returnCode < 0)
    {
        printf("Could not find stream information\n");
        exit(1);
    }

    /*输出输入文件信息*/
    av_dump_format(inputAvFormatContext, 0, inputFileName, 0);

    av_init_packet(&avPacket);
    avPacket.data = NULL;
    avPacket.size = 0;

    /**从输入文件中找到视频流的下标*/
    returnCode = av_find_best_stream(inputAvFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if(returnCode < 0)
    {
        printf("Could not find %s stream in input file %s\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO), inputFileName);
        return returnCode;
    }

    streamIndex = returnCode;
    inputAVStream = inputAvFormatContext->streams[streamIndex];

    /**为输入流寻找合适的解码器*/
    inputAVCodec = avcodec_find_decoder(inputAVStream->codecpar->codec_id);
    if(!inputAVCodec)
    {
        printf("Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return AVERROR(EINVAL);
    }

    outputAVCodecContext = avcodec_alloc_context3(NULL);
    if(!outputAVCodecContext)
    {
        printf("Could not alllocate video codec context\n");
        exit(1);
    }

    /**把输入流的编码器参数赋值给输出流上下文*/
    returnCode = avcodec_parameters_to_context(outputAVCodecContext, inputAVStream->codecpar);
    if(returnCode < 0)
    {
        printf("Failed to copy %s codec parameters to decoder context\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
        return returnCode;
    }

    returnCode = avcodec_open2(outputAVCodecContext, inputAVCodec, NULL);
    if(returnCode < 0)
    {
        printf("Could not open codec\n");
        exit(1);
    }

    img_convert_ctx = sws_getContext(outputAVCodecContext->width, outputAVCodecContext->height, outputAVCodecContext->pix_fmt,
                                     outputAVCodecContext->width, outputAVCodecContext->height, AV_PIX_FMT_BGR24,
                                     SWS_BICUBIC, NULL, NULL, NULL);
    if(img_convert_ctx == NULL)
    {
        printf("cannot initialize the conversion context\n");
        exit(1);
    }

    avFrame = av_frame_alloc();
    if(!avFrame)
    {
        printf("Could not allocate video frame\n");
        exit(1);
    }

    frameCount = 0;
    while (av_read_frame(inputAvFormatContext, &avPacket) >= 0)
    {
        if(avPacket.stream_index == streamIndex)
        {
            returnCode = decode_write_frame(outputFileName, outputAVCodecContext, img_convert_ctx, avFrame, &frameCount, &avPacket, 0);
            if(returnCode < 0)
            {
                exit(1);
            }
        }
        av_packet_unref(&avPacket);
    }

    avPacket.data = NULL;
    avPacket.size = 0;
    decode_write_frame(outputFileName, outputAVCodecContext, img_convert_ctx, avFrame, &frameCount, &avPacket, 1);
    fclose(file);
    avformat_close_input(&inputAvFormatContext);
    sws_freeContext(img_convert_ctx);
    avcodec_free_context(&outputAVCodecContext);
    av_frame_free(&avFrame);

    return 0;
}












































