package com.byron.media.server.ffmpeg;

import com.byron.media.server.config.MediaServerConfig;
import com.byron.media.server.utils.FrameInfo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.FilenameUtils;
import org.bytedeco.javacpp.avcodec;

import java.io.File;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicBoolean;

import static org.bytedeco.javacpp.avcodec.*;
import static org.bytedeco.javacpp.avformat.*;
import static org.bytedeco.javacpp.avutil.*;

@Slf4j
public class AnyToMp4 implements Runnable {

    private String url;

    private String outFile;

    private boolean running;

    private AVPacket packet;

    private AVFormatContext format_ctx;

    private AVFormatContext out_format_ctx;         // MP4输出的format

    private AVPacket out_packet;            // 输出的视频

    private int video_stream_index;

    private int audio_stream_index;

    private int out_video_steam_index;

    private int width = 1280;      // 编码视频大小

    private int height = 720;     // 编码视频大小

    private int dpi;

    private AnyToH264 anyToH264;

    private AnyToAAC anyToAAC;

    private final Object playLock = new Object();          // 用于暂停转录

    private long firstPts = -1;          // 用于记录第一个包的pts 和 dts

    private long firstDts = -1;

    public AnyToMp4(int width, int height, String url, String outFile) {
        this.url = url;
        this.width = width;
        this.height = height;
        this.anyToH264 = new AnyToH264();
        this.anyToAAC = new AnyToAAC();
        this.outFile = outFile;
    }

    @Override
    public void run() {
        try {
            initFfmpeg();
            anyToH264.initFfmpeg(format_ctx, width, height);

            // 解码出来之后推入到一个队列中
            anyToH264.setFrameListener(new FrameListener() {
                @Override
                public void onFrame(FrameInfo frameInfo) {

                    int res = 0;

                    if(firstPts == -1){
                        firstPts = frameInfo.getPts();
                    }

                    if(firstDts == -1){
                        firstDts = frameInfo.getDts();
                    }

                    // 视频统一为H264
                    if(frameInfo.isVideo()){
                        // TODO 输入到mp4文件中
                        // 封装成AVPackage 然后写入到outFormatContext中
                        out_packet.pts(frameInfo.getPts());
                        out_packet.dts(frameInfo.getPts());
                        out_packet.duration(frameInfo.getDuration());
                        out_packet.stream_index(out_video_steam_index);
                        out_packet.data().position(0).put(frameInfo.getFrameData(), frameInfo.getOffset(), frameInfo.getLength());
                        out_packet.size(frameInfo.getLength());
                        out_packet.pos(-1);

//                        res = av_interleaved_write_frame(out_format_ctx, out_packet);
//
//                        if(res < 0){
//                            log.warn( "Error muxing packet");
//                        }
                    }
                }
            });


            // TODO 设置AAC 输出
//            anyToAAC

            // TODO AAC 处理

            final long startTime = System.currentTimeMillis();

            int ret = 0;
            while(running){

                ret = av_read_frame(format_ctx, packet);
                if (ret < 0) {
                    log.info(String.format("error or end of file: %d\n", ret));
                    running = false;
                    close();
                    log.info("finished at " + (System.currentTimeMillis() - startTime));
                    continue;
                }

                if (packet.stream_index() == video_stream_index) {
                    int length = packet.size();
                    byte[] h264 = new byte[length];
                    packet.position(0);
                    packet.data().get(h264, 0, length);
                    anyToH264.handle(h264, packet.pts(), packet.dts(), packet.duration());
                }

                if (packet.stream_index() == audio_stream_index) {
                    int length = packet.size();
                    byte[] aac = new byte[length];
                    packet.data().position(0).get(aac, 0, length);
                    anyToAAC.handle(aac, packet.pts(), packet.dts());
                }

                // TODO 字幕处理
//                if(packet.stream_index() == text_stream_index){
//
//                }

                av_packet_unref(packet);
            }
//            releaseFfmpeg();
            anyToH264.releaseFfmpeg();
        } catch (Exception ex) {
            log.error(ex.toString(), ex);
        }
    }

    /**
      开始线程
     */
    public void start() {
        running = true;
        new Thread(this).start();
    }

    public void close() {
        running = false;
    }

    private int releaseFfmpeg(){
        if(format_ctx != null){
            avformat_free_context(format_ctx);
            format_ctx.setNull();
            format_ctx = null;
        }
        return 0;
    }

    private int initFfmpeg(){
        avformat_network_init();
        // Allocate an AVFormatContext
        format_ctx = avformat_alloc_context();

        // open rtsp: Open an input stream and read the header. The codecs are not opened
        int ret = -1;
        ret = avformat_open_input(format_ctx, url, null, null);
        if (ret != 0) {
            log.info(String.format("fail to open url: %s, return value: %d\n", url, ret));
            return -1;
        }

        // Read packets of a media file to get stream information
        ret = avformat_find_stream_info(format_ctx, (AVDictionary) null);
        if ( ret < 0) {
            log.info(String.format("fail to get stream information: %d\n", ret));
            return -1;
        }

        // audio/video stream index
        video_stream_index = -1;
        audio_stream_index = -1;
        log.info(String.format("Number of elements in AVFormatContext.streams: %d\n", format_ctx.nb_streams()));
        for (int i = 0; i < format_ctx.nb_streams(); ++i) {
		    AVStream stream = format_ctx.streams(i);
            log.info(String.format("type of the encoded data: %d\n", stream.codecpar().codec_id()));
            if (stream.codecpar().codec_type() == AVMEDIA_TYPE_VIDEO) {
                video_stream_index = i;
                log.info(String.format("dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n",
                        stream.codecpar().width(), stream.codecpar().height(), stream.codecpar().format()));
            } else if (stream.codecpar().codec_type() == AVMEDIA_TYPE_AUDIO) {
                audio_stream_index = i;
                log.info(String.format("audio sample format: %d\n", stream.codecpar().format()));
            }
        }

        if (video_stream_index == -1) {
            log.info(String.format("no video stream\n"));
            return -1;
        }

        if (audio_stream_index == -1) {
            log.info(String.format("no audio stream\n"));
        }

        packet = new AVPacket();


        // 初始化输出
//        File out = new File(outFile);
//        if (out.exists()) {
//            out.delete();
//        }
//        try {
//            out.createNewFile();
//        } catch (IOException e) {
//            e.printStackTrace();
//        }
//
////        String out_filename = FilenameUtils.getName(outFile);
//        out_format_ctx = new AVFormatContext();
//        ret = avformat_alloc_output_context2(out_format_ctx, null, null, outFile);
//        if(ret < 0){
//            log.info("Could not create output context");
//            return -1;
//        }
//
//        AVStream outSteam = null;
//        AVOutputFormat outputFormat = out_format_ctx.oformat();
//        if(outputFormat.video_codec() != AV_CODEC_ID_NONE){
//            AVCodec out_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
//            outSteam = add_stream(out_format_ctx, out_codec, AV_CODEC_ID_H264);
//        }
//
//        av_dump_format(out_format_ctx, 0, outFile, 1);

//        AVStream out_stream = avformat_new_stream(out_format_ctx, out_codec);
//        if(out_stream == null){
//            return -1;
//        }
//
//        open_video(out_format_ctx, video_codec, m_pVideoSt);
//
//
//
//        out_video_steam_index = out_stream.index();
//        AVCodecContext out_codec_context = out_stream.codec();
//        initOutCodecContext(out_codec_context, width, height, dpi);
//
//        av_dump_format(out_format_ctx, 0, outFile, 1);
//
//        ret = outputFormat.flags() & AVFMT_NOFILE;
//        if (ret == 0) {
//            ret = avio_open(out_format_ctx.pb(), outFile, AVIO_FLAG_WRITE);
//            if (ret < 0) {
//                log.info( String.format("Could not open output file '%s'", outFile));
//            }
//        }
//
////        AVOutputFormat outputFormat = out_format_ctx.oformat();
//        //Open output file
//
//
//        ret = avformat_write_header(out_format_ctx, (AVDictionary) null);
//        if(ret < 0){
//            return -1;
//        }
//
        out_packet = av_packet_alloc();
        av_new_packet(out_packet, 1024 * 1024);

        return 0;
    }

//    private AVStream add_stream(AVFormatContext oc, AVCodec codec, int codec_id) {
//        AVCodecContext c;
//        AVStream st;
//        codec = avcodec_find_encoder(codec_id);
//        if (codec == null) {
//            log.info(String.format("could not find encoder for '%s' \n", avcodec_get_name(codec_id)));
//            return null;
//        }
//        st = avformat_new_stream(oc, codec);
//        if (st == null) {
//            log.info("could not allocate stream \n");
//            return null;
//        }
//        st.id(oc.nb_streams() - 1);
//        c = st.codec();
//        int vi = st.index();
//        switch (codec.type()) {
//            case AVMEDIA_TYPE_AUDIO:
//                log.info("AVMEDIA_TYPE_AUDIO\n");
////                c.sample_fmt = (codec).sample_fmts ? (codec).sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
////                c.bit_rate = 64000;
////                c.sample_rate = 44100;
////                c.channels = 2;
//                break;
//            case AVMEDIA_TYPE_VIDEO:
//                log.info("AVMEDIA_TYPE_VIDEO\n");
//                initOutCodecContext(c, width, height, dpi);
////                c.codec_id = AV_CODEC_ID_H264;
////                c.bit_rate = 0;
////                c.width = 1080;
////                c.height = 720;
////                c.time_base.den = 50;
////                c.time_base.num = 1;
////                c.gop_size = 1;
////                c.pix_fmt = AV_PIX_FMT_YUV420P;
////                if (c.codec_id == AV_CODEC_ID_MPEG2VIDEO)
////                {
////                    c.max_b_frames = 2;
////                }
////                if (c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
////                {
////                    c.mb_decision = 2;
////                }
//                break;
//            default:
//                break;
//        }
//        if ((oc.oformat().flags() & AVFMT_GLOBALHEADER) > 0 ) {
////            c.flags(c.flags() |= CODEC_FLAG_GLOBAL_HEADER);
//            int flag = c.flags();
//            c.flags(flag | AV_CODEC_FLAG_GLOBAL_HEADER);
//        }
//        return st;
//    }

    private void initOutCodecContext(AVCodecContext codecContext, int width, int height, int dpi){
        codecContext
                //目标的码率，即采样的码率；显然，采样码率越大，视频大小越大
                .bit_rate(width * height * dpi)
                .rc_max_rate(width * height * dpi)
                .rc_min_rate(width * height * dpi)

                //编码目标的视频帧大小，以像素为单位
                .width(width)
                .height(height)

                //帧率的基本单位，我们用分数来表示，
                .time_base(new AVRational().num(1).den(10))

                //像素的格式，也就是说采用什么样的色彩空间来表明一个像素点
                .pix_fmt(AV_PIX_FMT_YUV420P)

                //每20帧插入1个I帧，I帧越少，视频越小
                .gop_size(20)

                //两个非B帧之间允许出现多少个B帧数
                //设置0表示不使用B帧
                //b 帧越多，图片越小
                .max_b_frames(0)

                //最大和最小量化系数
                .qmin(10)
                .qmax(50)

                //因为我们的量化系数q是在qmin和qmax之间浮动的，
                //qblur表示这种浮动变化的变化程度，取值范围0.0～1.0，取0表示不削减
                .qblur(0)

                //运动场景预判功能的力度，数值越大编码时间越长
                .me_pre_cmp(2)

                //DCT变换算法的设置，有7种设置，这个算法的设置是根据不同的CPU指令集来优化的取值范围在0-7之间
                .dct_algo(0)

                //这两个参数表示对过亮或过暗的场景作masking的力度，0表示不作
                .lumi_masking(0)
                .dark_masking(0);
    }

}
