//
//  ffmpeg_file.c
//  FFmpegDemo
//
//  Created by xiaerfei on 2019/3/1.
//  Copyright © 2019 erfeixia. All rights reserved.
//

#include "ffmpeg_file.h"

#include <libavformat/avformat.h>
#include <libavutil/timestamp.h>

#define ADTS_HEADER_LEN  7;

void adts_header(char *szAdtsHeader, int dataLen){
    
    int audio_object_type = 2;
    int sampling_frequency_index = 7;
    int channel_config = 2;
    
    int adtsLen = dataLen + 7;
    
    szAdtsHeader[0] = 0xff;         //syncword:0xfff                          高8bits
    szAdtsHeader[1] = 0xf0;         //syncword:0xfff                          低4bits
    szAdtsHeader[1] |= (0 << 3);    //MPEG Version:0 for MPEG-4,1 for MPEG-2  1bit
    szAdtsHeader[1] |= (0 << 1);    //Layer:0                                 2bits
    szAdtsHeader[1] |= 1;           //protection absent:1                     1bit
    
    szAdtsHeader[2] = (audio_object_type - 1)<<6;            //profile:audio_object_type - 1                      2bits
    szAdtsHeader[2] |= (sampling_frequency_index & 0x0f)<<2; //sampling frequency index:sampling_frequency_index  4bits
    szAdtsHeader[2] |= (0 << 1);                             //private bit:0                                      1bit
    szAdtsHeader[2] |= (channel_config & 0x04)>>2;           //channel configuration:channel_config               高1bit
    
    szAdtsHeader[3] = (channel_config & 0x03)<<6;     //channel configuration:channel_config      低2bits
    szAdtsHeader[3] |= (0 << 5);                      //original：0                               1bit
    szAdtsHeader[3] |= (0 << 4);                      //home：0                                   1bit
    szAdtsHeader[3] |= (0 << 3);                      //copyright id bit：0                       1bit
    szAdtsHeader[3] |= (0 << 2);                      //copyright id start：0                     1bit
    szAdtsHeader[3] |= ((adtsLen & 0x1800) >> 11);           //frame length：value   高2bits
    
    szAdtsHeader[4] = (uint8_t)((adtsLen & 0x7f8) >> 3);     //frame length:value    中间8bits
    szAdtsHeader[5] = (uint8_t)((adtsLen & 0x7) << 5);       //frame length:value    低3bits
    szAdtsHeader[5] |= 0x1f;                                 //buffer fullness:0x7ff 高5bits
    szAdtsHeader[6] = 0xfc;
}



int delete_file(char *file_name) {
    int ret = avpriv_io_delete(file_name);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to delete file: %s\n",file_name);
        return -1;
    }
    
    return 0;
}

int move_file(char *src_path, char *dst_path) {
    int ret = avpriv_io_move(src_path, dst_path);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to move file from: %s to: %s\n",src_path, dst_path);
        return -1;
    }
    return 0;
}

int list(char *path) {
    AVIODirContext *ctx = NULL;
    
    int ret = avio_open_dir(&ctx, path, NULL);
    
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to open dir: %s\n",av_err2str(ret));
        return -1;
    }
    
    while (1) {
        AVIODirEntry *entry = NULL;
        ret = avio_read_dir(ctx, &entry);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "failed to read dir: %s\n",av_err2str(ret));
            goto __failed;
        }
        
        if (entry == NULL) {
            break;
        }
        
        av_log(NULL, AV_LOG_ERROR, "%12"PRId64" %s\n",entry->size, entry->name);
        avio_free_directory_entry(&entry);
    }
    
__failed:
    avio_close_dir(&ctx);
    
    
    return 0;
}

int dump_fmt(char *file_path) {
    av_register_all();
    AVFormatContext *fmt_ctx = NULL;
    
    int ret = avformat_open_input(&fmt_ctx, file_path, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to open input: %s\n",av_err2str(ret));
        return -1;
    }
    
    av_dump_format(fmt_ctx, 0, file_path, 0);
    avformat_close_input(&fmt_ctx);
    
    return 0;
}

int fetch_audio(char *src_path, char *dst_path) {
    av_register_all();
    AVFormatContext *fmt_ctx = NULL;
    
    int ret = avformat_open_input(&fmt_ctx, src_path, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to open input: %s\n",av_err2str(ret));
    }
    
    av_dump_format(fmt_ctx, 0, src_path, 0);
    // 打开文件
    FILE *file_fd = fopen(dst_path, "wb");
    if (file_fd == NULL) {
        av_log(NULL, AV_LOG_ERROR, "failed to open output file: %s\n",dst_path);
        avformat_close_input(&fmt_ctx);
        return -1;
    }
    
    // fetch stream
    /*
     param 1: 上下文
     param 2: 视频流、音频流、字幕
     param 3: 所处理的流的索引号，可以写 -1: 未知
     param 4: 相关的索引号，如音频流对应的视频流索引号 可以写 -1: 不关心
     param 5: 指定流的编解码器 不使用: NULL
     param 6: 标准 不关心: 0
     return: 流的索引值（视频文件中有很多流，每个流都有一个索引值）
     */
    int audio_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    if (audio_index < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to find best stream\n");
        fclose(file_fd);
        avformat_close_input(&fmt_ctx);
        return -1;
    }
    
    
    AVPacket pkt;
    av_init_packet(&pkt);
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        if (pkt.stream_index == audio_index) {
            char adts_header_buf[7];
            adts_header(adts_header_buf, pkt.size);
            fwrite(adts_header_buf, 1, 7, file_fd);
            size_t len = fwrite(pkt.data, 1, pkt.size, file_fd);
            if (len != pkt.size) {
                av_log(NULL, AV_LOG_WARNING, "file len is not equal pkt.size !!!\n");
            }
        }
        av_packet_unref(&pkt);
    }
    
    
    avformat_close_input(&fmt_ctx);
    if (file_fd) {
        fclose(file_fd);
    }
    return 0;
}
/*
 调用 av_guess_format 让ffmpeg帮你找到一个合适的文件格式。
 调用 avformat_new_stream 为输出文件创建一个新流。
 调用 avio_open 打开新创建的文件。
 调用 avformat_write_header 写文件头。
 调用 av_interleaved_write_frame 写文件内容。
 调用 av_write_trailer 写文件尾。
 调用 avio_close 关闭文件。
 */

int fetch_audio_ffmpeg_api(const char *src_path,const char *dst_path) {
    av_register_all();
    AVFormatContext *fmt_ctx = NULL;
    
    int ret = avformat_open_input(&fmt_ctx, src_path, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to open input: %s\n",av_err2str(ret));
    }
    
    av_dump_format(fmt_ctx, 0, src_path, 0);
    

    
    // out file
    AVFormatContext *ofmt_ctx = avformat_alloc_context();
    AVOutputFormat *output_fmt = av_guess_format(NULL, dst_path, NULL);
    
    if(output_fmt == NULL){
        av_log(NULL, AV_LOG_DEBUG, "Cloud not guess file format \n");
        return -1;
    }
    
    ofmt_ctx->oformat = output_fmt;
    
    AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(out_stream == NULL){
        av_log(NULL, AV_LOG_DEBUG, "Failed to create out stream!\n");
        return -1;
    }
    
    if(fmt_ctx->nb_streams < 2){
        av_log(NULL, AV_LOG_ERROR, "the number of stream is too less!\n");
        return -1;
    }
    
    
    AVStream *in_stream = fmt_ctx->streams[1];
    AVCodecParameters *in_codecpar = in_stream->codecpar;
    if(in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO){
        av_log(NULL, AV_LOG_ERROR, "The Codec type is invalid!\n");
        exit(1);
    }
    
    int err_code = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
    if (err_code < 0) {
        av_log(NULL, AV_LOG_ERROR,
               "Failed to copy codec parameter, %d\n",
               err_code);
        return -1;
    }
    
    out_stream->codecpar->codec_tag = 0;
    
    err_code = avio_open(&ofmt_ctx->pb, dst_path, AVIO_FLAG_WRITE);
    if (err_code < 0) {
        av_log(NULL, AV_LOG_DEBUG, "Could not open file %s, %d\n",
               dst_path,
               err_code);
        return -1;
    }
    
    av_dump_format(ofmt_ctx, 0, dst_path, 1);
    
    // fetch stream
    /*
     param 1: 上下文
     param 2: 视频流、音频流、字幕
     param 3: 所处理的流的索引号，可以写 -1: 未知
     param 4: 相关的索引号，如音频流对应的视频流索引号 可以写 -1: 不关心
     param 5: 指定流的编解码器 不使用: NULL
     param 6: 标准 不关心: 0
     return: 流的索引值（视频文件中有很多流，每个流都有一个索引值）
     */
    int audio_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    if (audio_index < 0) {
        av_log(NULL, AV_LOG_ERROR, "failed to find best stream\n");
        avformat_close_input(&fmt_ctx);
        return -1;
    }
    
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        av_log(NULL, AV_LOG_DEBUG, "Error occurred when opening output file");
        return -1;
    }

    
    AVPacket pkt;
    av_init_packet(&pkt);
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        if (pkt.stream_index == audio_index) {
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = pkt.pts;
            pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
            pkt.pos = -1;
            pkt.stream_index = 0;
            av_interleaved_write_frame(ofmt_ctx, &pkt);
            av_packet_unref(&pkt);
        }
        av_packet_unref(&pkt);
    }
    
    av_write_trailer(ofmt_ctx);
    avformat_close_input(&fmt_ctx);
    avio_close(ofmt_ctx->pb);
    return 0;
}
#pragma mark - 从 MP4 中抽取 h264视频
#ifndef AV_WB32
#   define AV_WB32(p, val) do {                 \
        uint32_t d = (val);                     \
        ((uint8_t*)(p))[3] = (d);               \
        ((uint8_t*)(p))[2] = (d)>>8;            \
        ((uint8_t*)(p))[1] = (d)>>16;           \
        ((uint8_t*)(p))[0] = (d)>>24;           \
    } while(0)
#endif
static int alloc_and_copy(AVPacket *out,
                          const uint8_t *sps_pps, uint32_t sps_pps_size,
                          const uint8_t *in, uint32_t in_size) {
    
    uint32_t offset         = out->size;
    uint8_t nal_header_size = offset ? 3 : 4;
    int err;
    
    err = av_grow_packet(out, sps_pps_size + in_size + nal_header_size);
    if (err < 0) return err;
    
    if (sps_pps)
        memcpy(out->data + offset, sps_pps, sps_pps_size);
    memcpy(out->data + sps_pps_size + nal_header_size + offset, in, in_size);
    
    if (!offset) {
        AV_WB32(out->data + sps_pps_size, 1);
    } else {
        (out->data + offset + sps_pps_size)[0] =
        (out->data + offset + sps_pps_size)[1] = 0;
        (out->data + offset + sps_pps_size)[2] = 1;
    }
    
    return 0;
}

/*
 MPEG-4 Part 15 "Advanced Video Coding (AVC) file format" section 5.2.4.1 的规定如下：
 aligned(8) class AVCDecoderConfigurationRecord {
     unsigned int(8) configurationVersion = 1;
     unsigned int(8) AVCProfileIndication;
     unsigned int(8) profile_compatibility;
     unsigned int(8) AVCLevelIndication;
     bit(6) reserved = ‘111111’b;
     unsigned int(2) lengthSizeMinusOne;
     bit(3) reserved = ‘111’b;
     unsigned int(5) numOfSequenceParameterSets;
     for (i=0; i< numOfSequenceParameterSets;  i++) {
         unsigned int(16) sequenceParameterSetLength ;
         bit(8*sequenceParameterSetLength) sequenceParameterSetNALUnit;
     }
     unsigned int(8) numOfPictureParameterSets;
     for (i=0; i< numOfPictureParameterSets;  i++) {
         unsigned int(16) pictureParameterSetLength;
         bit(8*pictureParameterSetLength) pictureParameterSetNALUnit;
     }
 }
 */

#ifndef AV_RB16
#   define AV_RB16(x)                           \
    ((((const uint8_t*)(x))[0] << 8) |          \
      ((const uint8_t*)(x))[1])
#endif
int h264_extradata_to_annexb(const uint8_t *codec_extradata, const int codec_extradata_size, AVPacket *out_extradata, int padding) {
    const uint8_t *extradata            = codec_extradata + 4;// 前 4 个字节无用，所以略过
    static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
    int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size, 用于指示表示编码数据长度所需字节数
    uint8_t *out = NULL, unit_nb, sps_done = 0,
    
    sps_seen = 0, pps_seen = 0, sps_offset = 0, pps_offset = 0;
    
    int total_size = 0;
    
    sps_offset = pps_offset = -1;
    
    unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
    
    if (!unit_nb) {
        goto pps;
    } else {
        sps_offset = 0;
        sps_seen = 1;
    }
    
    while (unit_nb--) {
        
        uint16_t unit_size   = AV_RB16(extradata);
        // 4 是 header 的大小
        total_size += (unit_size + 4);
        
        if (total_size > INT_MAX - padding) {
            av_log(NULL, AV_LOG_ERROR,
                   "Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        
        if (extradata + 2 + unit_size > codec_extradata + codec_extradata_size) {
            av_log(NULL, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
                   "corrupted stream or invalid MP4/AVCC bitstream\n");
            av_free(out);
            return AVERROR(EINVAL);
        }
        
        int err;
        if ((err = av_reallocp(&out, total_size + padding)) < 0) return err;
        // 将 header 拷贝进去，作为 startcode
        memcpy(out + total_size - unit_size - 4, nalu_header, 4);
        // 将 sps 拷贝进去
        memcpy(out + total_size - unit_size, extradata + 2, unit_size);
        // 将指针移动到下一个 sps 或者 pps
        extradata += 2 + unit_size;
        
    pps:
        if (!unit_nb && !sps_done++) {
            unit_nb = *extradata++; /* number of pps unit(s) */
            if (unit_nb) {
                pps_offset = total_size;
                pps_seen = 1;
            }
        }
    }
    
    if (out)
        memset(out + total_size, 0, padding);
    
    if (!sps_seen)
        av_log(NULL, AV_LOG_WARNING,
               "Warning: SPS NALU missing or invalid. "
               "The resulting stream may not play.\n");
    
    if (!pps_seen)
        av_log(NULL, AV_LOG_WARNING,
               "Warning: PPS NALU missing or invalid. "
               "The resulting stream may not play.\n");
    
    out_extradata->data      = out;
    out_extradata->size      = total_size;
    
    return length_size;
}

int h264_mp4toannexb(AVFormatContext *fmt_ctx, AVPacket *in, FILE *dst_fd) {
    const uint8_t *buf     = in->data;
    int            buf_size = in->size;
    const uint8_t *buf_end = in->data + in->size;
    uint32_t cumul_size    = 0;
    
    AVPacket *out = av_packet_alloc();
    AVPacket spspps_pkt;
    
    do {
        /*
           size type
         ---------------------
         |     |  |
         |  4  | 1| ......
         |     |  |
         ---------------------
         */
        if (buf + 4 /*s->length_size*/ > buf_end)
            goto fail;
        
        // 前 4 个字节表示数据包的大小，这里做的大到小端的转换
        int32_t nal_size = 0;
        for (int i = 0; i < 4/*s->length_size*/; i++)
            nal_size = (nal_size << 8) | buf[i];
        buf += 4; /*s->length_size;*/
        // 获取该数据包的类型
        uint8_t unit_type = *buf & 0x1f;
        
        if (nal_size > buf_end - buf || nal_size < 0)
            goto fail;
        // nalu 的单元类型 为 IDR 帧
        if (unit_type == 5) {
            // sps pps 存放于 codec->extradata 中
            h264_extradata_to_annexb( fmt_ctx->streams[in->stream_index]->codec->extradata,
                                     fmt_ctx->streams[in->stream_index]->codec->extradata_size,
                                     &spspps_pkt,
                                     AV_INPUT_BUFFER_PADDING_SIZE);
            if (alloc_and_copy(out,
                                    spspps_pkt.data, spspps_pkt.size,
                                    buf, nal_size) < 0)
                goto fail;
        } else {
            if (alloc_and_copy(out, NULL, 0, buf, nal_size) < 0)
                goto fail;
        }
        
        size_t len = fwrite( out->data, 1, out->size, dst_fd);
        if(len != out->size){
            av_log(NULL, AV_LOG_DEBUG, "warning, length of writed data isn't equal pkt.size(%zu, %d)\n",
                   len,
                   out->size);
        }
        fflush(dst_fd);// 写入文件
        
        buf        += nal_size;
        cumul_size += nal_size + 4;//s->length_size;
        
    } while (cumul_size < buf_size);
    
fail:
    av_packet_free(&out);
    return 0;
}


int fetch_vedio_ffmpeg_api(char *src_path, char *dst_path) {
    
    int err_code;
    char errors[1024];
    
    av_register_all();
    AVFormatContext *fmt_ctx = NULL;
    // 创建文件指针
    FILE *dst_fd = fopen(dst_path, "wb");
    if (!dst_fd) {
        av_log(NULL, AV_LOG_DEBUG, "Could not open destination file %s\n", dst_path);
        return -1;
    }
    // 打开多媒体文件
    if((err_code = avformat_open_input(&fmt_ctx, src_path, NULL, NULL)) < 0){
        av_strerror(err_code, errors, 1024);
        av_log(NULL, AV_LOG_DEBUG, "Could not open source file: %s, %d(%s)\n",
               src_path,
               err_code,
               errors);
        return -1;
    }
    // 打印多媒体信息
    av_dump_format(fmt_ctx, 0, src_path, 0);
    
    // 初始化 packet
    AVPacket pkt;
    
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    
    // 找到视频对应的 stream index
    int video_stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if(video_stream_index < 0){
        av_log(NULL, AV_LOG_DEBUG, "Could not find %s stream in input file %s\n",
               av_get_media_type_string(AVMEDIA_TYPE_VIDEO),
               src_path);
        return AVERROR(EINVAL);
    }
    // 取出每个 packet 进行处理
    while(av_read_frame(fmt_ctx, &pkt) >= 0 ) {
        if(pkt.stream_index == video_stream_index){
            h264_mp4toannexb(fmt_ctx, &pkt, dst_fd);
        }
        //release pkt->data
        av_packet_unref(&pkt);
    }
    avformat_close_input(&fmt_ctx);
    if(dst_fd) {
        fclose(dst_fd);
    }
    return 0;
}
#pragma mark - 将视频转成 flv 格式

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) {
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
    
    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}

int transfrom_vedio_to_flv(char *in_filename, char *out_filename) {
    AVOutputFormat *ofmt = NULL;// 输出文件格式
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;// AV 上下文
    AVPacket pkt;// 包
    int ret, i;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;
    
    
    av_register_all();
    // 打开输入文件
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    // 查找输入文件的流信息
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
    // 创建输出文件的上下文，对应的格式由文件的后缀名决定
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    
    // 输入文件有多少个流
    stream_mapping_size = ifmt_ctx->nb_streams;
    // 创建一个 数组
    stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
    if (!stream_mapping) {
        ret = AVERROR(ENOMEM);
        goto end;
    }
    
    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *out_stream;
        // 取出输入流
        AVStream *in_stream = ifmt_ctx->streams[i];
        // 取出输入流的编码参数
        AVCodecParameters *in_codecpar = in_stream->codecpar;
        // 这只取输入流的: 音频流、视频流、字幕流
        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
            stream_mapping[i] = -1;
            continue;
        }
        // 这里重新排了新的流索引
        stream_mapping[i] = stream_index++;
        // 创建一个输出流
        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        // 将输入流的编码参数拷贝至输出流
        ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy codec parameters\n");
            goto end;
        }
        //TODO: 暂时不知道 codec_tag 是干啥的
        out_stream->codecpar->codec_tag = 0;
    }
    
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    //TODO: 暂时不知道 这个 flags 是干啥的
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        // 打开文件 IO
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    // 写入对应的header 信息
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
    
    while (1) {
        AVStream *in_stream, *out_stream;
        // 读输入数据包
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0) break;
        
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        // 判断是否超出流的个数或者是忽略的流(不属于音频流、视频流和字幕流)
        if (pkt.stream_index >= stream_mapping_size ||
            stream_mapping[pkt.stream_index] < 0) {
            av_packet_unref(&pkt);
            continue;
        }
        // 给新的流赋值上新的流索引
        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");
        
        /* copy packet */
        // int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd);是计算 "a * b / c" 的值并分五种方式来取整.
        // 用在FFmpeg中, 则是将以 "时钟基c" 表示的 数值a 转换成以 "时钟基b" 来表示。
        // 显示时间基转换
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // 解码的时间基转换
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        // 时长转换
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
        
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }
    
    av_write_trailer(ofmt_ctx);
end:
    
    avformat_close_input(&ifmt_ctx);
    
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    
    av_freep(&stream_mapping);
    
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
    
    
    
    return 0;
}

#pragma mark - 裁剪视频

int cut_vedio(double from_seconds, double end_seconds, const char* in_filename, const char* out_filename) {
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
    
    av_register_all();
    
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
    // 创建输出的 AVFormatContext
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    
    ofmt = ofmt_ctx->oformat;
    
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    // 输出 IO 在avformat_write_header 之前设置
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
    // seek 到对应的时间
    ret = av_seek_frame(ifmt_ctx, -1, from_seconds*AV_TIME_BASE, AVSEEK_FLAG_ANY);
    if (ret < 0) {
        fprintf(stderr, "Error seek\n");
        goto end;
    }
    
    int64_t *dts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
    memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
    int64_t *pts_start_from = malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
    memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
    
    while (1) {
        AVStream *in_stream, *out_stream;
        
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;
        
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        
        log_packet(ifmt_ctx, &pkt, "in");
        
        if (av_q2d(in_stream->time_base) * pkt.pts > end_seconds) {
            av_free_packet(&pkt);
            break;
        }
        
        if (dts_start_from[pkt.stream_index] == 0) {
            dts_start_from[pkt.stream_index] = pkt.dts;
            printf("dts_start_from: %s\n", av_ts2str(dts_start_from[pkt.stream_index]));
        }
        if (pts_start_from[pkt.stream_index] == 0) {
            pts_start_from[pkt.stream_index] = pkt.pts;
            printf("pts_start_from: %s\n", av_ts2str(pts_start_from[pkt.stream_index]));
        }
        
        
        
        /* copy packet */
        /**
         * pts(视频的同步和输出)时间的转换
         * pts_start_from[pkt.stream_index] 是以 from_seconds 开始的
         */
        pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        /**
         * dts(视频的解码)时间的转换
         * dts_start_from[pkt.stream_index] 是以 from_seconds 开始的
         */
        pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        if (pkt.pts < 0) {
            pkt.pts = 0;
        }
        if (pkt.dts < 0) {
            pkt.dts = 0;
        }
        pkt.duration = (int)av_rescale_q((int64_t)pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
        printf("\n");
        
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
    }
    free(dts_start_from);
    free(pts_start_from);
    
    av_write_trailer(ofmt_ctx);
end:
    avformat_close_input(&ifmt_ctx);
    
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
    
    
    return 0;
}

int compose_vedio(const char *video_video_file_name, const char *video_audio_file_name, const char *video_output_file_name) {
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *video_ifmt_ctx = NULL,*audio_ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVStream *in_video_stream = NULL, *in_audio_stream = NULL, *out_video_stream = NULL, *out_audio_stream = NULL;
    AVPacket pkt;
    int ret, i;
    int video_stream = 0, audio_stream = 0;
    
    const char *audio_file_name = video_audio_file_name;
    
//    fetch_audio_ffmpeg_api(video_audio_file_name, audio_file_name);
    
    av_register_all();
    
    if ((ret = avformat_open_input(&video_ifmt_ctx, video_video_file_name, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", video_video_file_name);
        goto end;
    }
    
    if ((ret = avformat_find_stream_info(video_ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    
    av_dump_format(video_ifmt_ctx, 0, video_video_file_name, 0);
    
    
    if ((ret = avformat_open_input(&audio_ifmt_ctx, audio_file_name, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", audio_file_name);
        goto end;
    }
    
    if ((ret = avformat_find_stream_info(audio_ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    
    av_dump_format(audio_ifmt_ctx, 0, audio_file_name, 0);
    
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, video_output_file_name);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    
    ofmt = ofmt_ctx->oformat;
    
    for (i = 0; i < video_ifmt_ctx->nb_streams; i++) {
        in_video_stream = video_ifmt_ctx->streams[i];
        AVCodecParameters *in_codecpar = in_video_stream->codecpar;
        // 这只取输入流的: 视频流
        if (in_codecpar->codec_type == AVMEDIA_TYPE_VIDEO ) {
            out_video_stream = avformat_new_stream(ofmt_ctx, NULL);
            if (!out_video_stream) {
                fprintf(stderr, "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            
            if(avcodec_parameters_copy(out_video_stream->codecpar, in_codecpar) < 0 ){
                fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_video_stream->codecpar->codec_tag = 0;
            video_stream = 1;
            break;
        }
    }
    
    if (audio_ifmt_ctx->nb_streams == 2) {
        in_audio_stream = audio_ifmt_ctx->streams[0];
        AVCodecParameters *in_codecpar = in_audio_stream->codecpar;
        if (in_codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            in_audio_stream = audio_ifmt_ctx->streams[1];
            in_codecpar = in_audio_stream->codecpar;
        }
        out_audio_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_audio_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        
        if(avcodec_parameters_copy(out_audio_stream->codecpar, in_codecpar) < 0 ){
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_audio_stream->codecpar->codec_tag = 0;
        audio_stream = 1;
    }
    
    av_dump_format(ofmt_ctx, 0, video_output_file_name, 1);
    
    // 输出 IO 在avformat_write_header 之前设置
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, video_output_file_name, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", video_output_file_name);
            goto end;
        }
    }
    
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
    
    av_init_packet(&pkt);
    long long video_pts = 0, audio_pts = 0;
    int64_t last_video_dts = 0, last_audio_dts = 0;
    int packets = 0;
    while (video_stream || audio_stream) {
        if (video_stream && (!audio_stream || av_compare_ts(video_pts, in_video_stream->time_base, audio_pts, in_audio_stream->time_base) < 0)) {
            ret = av_read_frame(video_ifmt_ctx, &pkt);
            if (ret < 0) {
                video_stream = 0;
                av_packet_unref(&pkt);
                break;
            }
            if (pkt.pts == AV_NOPTS_VALUE) {
                //Write PTS
                AVRational time_base1 = in_video_stream->time_base;
                //Duration between 2 frames (us)
                av_log(NULL, AV_LOG_DEBUG, "AV_TIME_BASE=%d,av_q2d=%f(num=%d, den=%d)\n",
                       AV_TIME_BASE,
                       av_q2d(in_video_stream->r_frame_rate),
                       in_video_stream->r_frame_rate.num,
                       in_video_stream->r_frame_rate.den);
                
                int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_video_stream->r_frame_rate);
                //Parameters
                pkt.pts=(double)(packets*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                pkt.dts=pkt.pts;
                video_pts = pkt.pts;
                pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                packets++;
            }
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_video_stream->time_base, out_video_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.duration = av_rescale_q(pkt.duration, in_video_stream->time_base, out_video_stream->time_base);
            pkt.pos = -1;
            pkt.stream_index=0;
            av_log(NULL, AV_LOG_DEBUG, "xxxxxxxxx%d, dts=%lld, pts=%lld\n", packets, pkt.dts, pkt.pts);
            if (last_video_dts <= pkt.dts) {
                video_pts = pkt.pts;
                last_video_dts = pkt.dts;
                video_stream = !av_interleaved_write_frame(ofmt_ctx, &pkt);
            }
        } else if (audio_stream) {
            ret = av_read_frame(audio_ifmt_ctx, &pkt);
            if (ret < 0) {
                audio_stream = 0;
                av_packet_unref(&pkt);
                break;
            }
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_audio_stream->time_base, out_audio_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
//            pkt.dts= pkt.pts;
            //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream2->time_base, out_stream2->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            
            pkt.duration = av_rescale_q(pkt.duration, in_audio_stream->time_base, out_audio_stream->time_base);
            pkt.pos = -1;
            pkt.stream_index=1;
            
            av_log(NULL, AV_LOG_DEBUG, "Write stream2 Packet. size:%5d\tpts:%lld\tdts:%lld\n",pkt.size,pkt.pts, pkt.dts);
            if (last_audio_dts <= pkt.dts) {
                audio_pts = pkt.pts;
                last_audio_dts = pkt.dts;
                audio_stream = !av_interleaved_write_frame(ofmt_ctx, &pkt);
            }
        }
        
        av_packet_unref(&pkt);
    }
    
    if(av_write_trailer(ofmt_ctx) < 0){
        av_log(NULL, AV_LOG_ERROR,
               "Error occurred when writing media tailer!\n");
        goto end;
    }
    
    ret = 0;
    
end:
    if (video_ifmt_ctx) {
        avformat_close_input(&video_ifmt_ctx);
    }
    if (audio_ifmt_ctx) {
        avformat_close_input(&audio_ifmt_ctx);
    }
    
    /* close output */
    if (ofmt_ctx) {
        if (!(ofmt->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb);
        avformat_free_context(ofmt_ctx);
    }
    
    return 0;
}

#define ERROR_STR_SIZE 1024
int merge_video(const char *src_file1,const char *src_file2,const char *out_file) {
    
    int ret = -1;
    
    int err_code;
    char errors[ERROR_STR_SIZE];
    
    
    AVFormatContext *ifmt_ctx1 = NULL;
    AVFormatContext *ifmt_ctx2 = NULL;
    
    AVFormatContext *ofmt_ctx = NULL;
    AVOutputFormat *ofmt = NULL;
    
    AVStream *in_stream1 = NULL;
    AVStream *in_stream2 = NULL;
    
    AVStream *out_stream1 = NULL;
    AVStream *out_stream2 = NULL;
    
    int64_t cur_pts1=0, cur_pts2=0;
    
    int b_use_video_ts = 1;
    uint32_t packets = 0;
    AVPacket pkt;
    
    int stream1 = 0, stream2 = 0;
    
    av_log_set_level(AV_LOG_DEBUG);
    //register avformat, codec
    av_register_all();
    
    //open first file
    if((err_code = avformat_open_input(&ifmt_ctx1, src_file1, 0, 0)) < 0 ){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Could not open src file, %s, %d(%s)\n",
               src_file1, err_code, errors);
        goto __FAIL;
    }
    
    if((err_code = avformat_find_stream_info(ifmt_ctx1, 0)) <0){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Failed to retrieve input stream info, %s, %d(%s) \n",
               src_file1, err_code, errors);
        goto __FAIL;
    }
    
    av_dump_format(ifmt_ctx1, 0, src_file1, 0);
    
    //open second file
    if((err_code = avformat_open_input(&ifmt_ctx2, src_file2, 0, 0)) < 0 ){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Could not open the second src file, %s, %d(%s)\n",
               src_file2, err_code, errors);
        goto __FAIL;
    }
    
    if((err_code = avformat_find_stream_info(ifmt_ctx2, 0)) <0){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Failed to retrieve input stream info, %s, %d(%s) \n",
               src_file2, err_code, errors);
        goto __FAIL;
    }
    
    av_dump_format(ifmt_ctx2, 0, src_file2, 0);
    
    //create out context
    if((err_code = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_file)) < 0 ){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Failed to create an context of outfile , %d(%s) \n",
               err_code, errors);
    }
    
    ofmt = ofmt_ctx->oformat;
    
    //create out stream according to input stream
    if(ifmt_ctx1->nb_streams == 1){
        in_stream1 = ifmt_ctx1->streams[0];
        stream1 = 1;
        
        AVCodecParameters *in_codecpar = in_stream1->codecpar;
        
        if(in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
           in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
           in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE){
            av_log(NULL, AV_LOG_ERROR, "The Codec type is invalid!\n");
            goto __FAIL;
        }
        
        out_stream1 = avformat_new_stream(ofmt_ctx, NULL);
        if(!out_stream1){
            av_log(NULL, AV_LOG_ERROR, "Failed to alloc out stream!\n");
            goto __FAIL;
        }
        
        if((err_code = avcodec_parameters_copy(out_stream1->codecpar, in_codecpar)) < 0 ){
            av_strerror(err_code, errors, ERROR_STR_SIZE);
            av_log(NULL, AV_LOG_ERROR,
                   "Failed to copy codec parameter, %d(%s)\n",
                   err_code, errors);
        }  
        
        out_stream1->codecpar->codec_tag = 0;
        
        /*
         if (ofmt->flags & AVFMT_GLOBALHEADER)
         out_stream1->codecpar->flags |= CODEC_FLAG_GLOBAL_HEADER;
         */
    }
    
    if(ifmt_ctx2->nb_streams == 1){
        in_stream2 = ifmt_ctx2->streams[0];
        stream2 = 1;
        
        AVCodecParameters *in_codecpar = in_stream2->codecpar;
        
        if(in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
           in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
           in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE){
            av_log(NULL, AV_LOG_ERROR, "The Codec type is invalid!\n");
            goto __FAIL;
        }
        
        out_stream2 = avformat_new_stream(ofmt_ctx, NULL);
        if(!out_stream2){
            av_log(NULL, AV_LOG_ERROR, "Failed to alloc out stream!\n");
            goto __FAIL;
        }
        
        if((err_code = avcodec_parameters_copy(out_stream2->codecpar, in_codecpar)) < 0 ){
            av_strerror(err_code, errors, ERROR_STR_SIZE);
            av_log(NULL, AV_LOG_ERROR,
                   "Failed to copy codec parameter, %d(%s)\n",
                   err_code, errors);
            goto __FAIL;
        }
        
        out_stream2->codecpar->codec_tag = 0;
        /*
         if (ofmt->flags & AVFMT_GLOBALHEADER)
         out_stream2->codecpar->flags |= CODEC_FLAG_GLOBAL_HEADER;
         */
    }
    
    av_dump_format(ofmt_ctx, 0, out_file, 1);
    
    //open out file
    if(!(ofmt->flags & AVFMT_NOFILE)){
        if((err_code = avio_open(&ofmt_ctx->pb, out_file, AVIO_FLAG_WRITE))<0){
            av_strerror(err_code, errors, ERROR_STR_SIZE);
            av_log(NULL, AV_LOG_ERROR,
                   "Could not open output file, %s, %d(%s)\n",
                   out_file, err_code, errors);
            goto __FAIL;
        }
    }
    
    //write media header
    if((err_code = avformat_write_header(ofmt_ctx, NULL)) < 0){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Error occurred when writing media header!\n");
        goto __FAIL;
    }
    
    av_init_packet(&pkt);
    
    while ( stream1 || stream2 ) {
        /* select the stream to encode */
        if (stream1 && ( !stream2 || av_compare_ts(cur_pts1, in_stream1->time_base, cur_pts2, in_stream2->time_base) <= 0)) {
            ret = av_read_frame(ifmt_ctx1, &pkt);
            if(ret < 0 ){
                stream1 = 0;
                break;
            }
            
            //pkt.pts = packets++;
            //in_stream1->time_base = (AVRational){in_stream1->r_frame_rate.den, in_stream1->r_frame_rate.num};
            
            if(!b_use_video_ts &&
               (in_stream1->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)){
                pkt.pts = ++packets;
                in_stream1->time_base = (AVRational){in_stream1->r_frame_rate.den, in_stream1->r_frame_rate.num};
                //pkt.pts = av_rescale_q(pkt.pts, fps, out_stream1->time_base);
                //pkt.dts = av_rescale_q(pkt.dts, fps, out_stream1->time_base);
                
                pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream1->time_base, out_stream1->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream1->time_base, out_stream1->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                //pkt.duration = av_rescale_q(pkt.duration, fps, out_stream1->time_base);
                pkt.dts = pkt.pts;
                av_log(NULL, AV_LOG_DEBUG, "xxxxxxxxx%d, dts=%lld, pts=%lld\n", packets, pkt.dts, pkt.pts);
            }
            
            //FIX：No PTS (Example: Raw H.264)
            //Simple Write PTS
            if(pkt.pts==AV_NOPTS_VALUE){
                //Write PTS
                AVRational time_base1 = in_stream1->time_base;
                //Duration between 2 frames (us)
                av_log(NULL, AV_LOG_DEBUG, "AV_TIME_BASE=%d,av_q2d=%f(num=%d, den=%d)\n",
                       AV_TIME_BASE,
                       av_q2d(in_stream1->r_frame_rate),
                       in_stream1->r_frame_rate.num,
                       in_stream1->r_frame_rate.den);
                
                int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream1->r_frame_rate);
                //Parameters
                pkt.pts=(double)(packets*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                pkt.dts=pkt.pts;
                cur_pts1 = pkt.pts;
                pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                packets++;
            }
            
            //Convert PTS/DTS
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream1->time_base, out_stream1->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = pkt.pts;
            //pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream1->time_base, out_stream1->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream1->time_base, out_stream1->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            
            pkt.duration = av_rescale_q(pkt.duration, in_stream1->time_base, out_stream1->time_base);
            pkt.pos = -1;
            pkt.stream_index=0;
            av_log(NULL, AV_LOG_DEBUG, "xxxxxxxxx%d, dts=%lld, pts=%lld\n", packets, pkt.dts, pkt.pts);
            
            stream1 = !av_interleaved_write_frame(ofmt_ctx, &pkt);
            //stream1 = !av_write_frame(ofmt_ctx, &pkt);
        } else if(stream2){
            ret = av_read_frame(ifmt_ctx2, &pkt);
            if(ret < 0 ){
                stream2 = 0;
                break;
            }
            
            if(!b_use_video_ts &&
               (in_stream2->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)){
                pkt.pts = packets++;
                pkt.dts = pkt.pts;
            }
            
            
            cur_pts2 = pkt.pts;
            //Convert PTS/DTS
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream2->time_base, out_stream2->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts= pkt.pts;
            //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream2->time_base, out_stream2->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            
            pkt.duration = av_rescale_q(pkt.duration, in_stream2->time_base, out_stream2->time_base);
            pkt.pos = -1;
            pkt.stream_index=1;
            
            av_log(NULL, AV_LOG_DEBUG, "Write stream2 Packet. size:%5d\tpts:%lld\tdts:%lld\n",pkt.size,pkt.pts, pkt.dts);
            
            
            stream2 = !av_interleaved_write_frame(ofmt_ctx, &pkt);
        }
        
        av_packet_unref(&pkt);
    }
    
    //write media tailer
    if((err_code = av_write_trailer(ofmt_ctx)) < 0){
        av_strerror(err_code, errors, ERROR_STR_SIZE);
        av_log(NULL, AV_LOG_ERROR,
               "Error occurred when writing media tailer!\n");
        goto __FAIL;
    }
    
    ret = 0;
    
__FAIL:
    
    if(ifmt_ctx1){
        avformat_close_input(&ifmt_ctx1);
    }
    
    if(ifmt_ctx2){
        avformat_close_input(&ifmt_ctx2);
    }
    
    if(ofmt_ctx){
        if(!(ofmt->flags & AVFMT_NOFILE)){
            avio_closep(&ofmt_ctx->pb);
        }
        avformat_free_context(ofmt_ctx);
    }
    
    
    return ret;
}
