//
// Created by haoy on 2017/3/17.
//
#include "rtp_writer.hpp"

#include <ios>
#include <iostream>
#include <map>
#include <fstream>
#include <h264/h264_nal.hpp>
#include <string>

#include "mod/vp8/vp8depacketizer.h"
#include "mod/h264/h264depacketizer.h"

extern "C" {
    #include "libavcodec/avcodec.h"
}

#define WRITER_DEBUG(event, fmt) LOG4CPLUS_DEBUG("", "writer: " << event << fmt)
#define WRITER_INFO(event, fmt) LOG4CPLUS_INFO("", "writer: " << event << fmt)
#define WRITER_WARN(event, fmt) LOG4CPLUS_WARN("", "writer: " << event << fmt)
#define WRITER_ERROR(event, fmt) LOG4CPLUS_ERROR("", "writer: " << event << fmt)

#define AUDIO_SAMPLE_DURATIN 20

static
const char* getCodecNameByType(mediacodec_id_t type) {
    switch(type) {
    case MCODEC_ID_YUV420P:
        return "yuv420p";
    case MCODEC_ID_VP8:
        return "vp8";
    case MCODEC_ID_VP9:
        return "vp9";
    case MCODEC_ID_H264:
        return "H264";
    case MCODEC_ID_PCM:
        return "PCM";
    case MCODEC_ID_OPUS:
        return "OPUS";
    case MCODEC_ID_ILBC:
        return "ILBC";
    case MCODEC_ID_ISAC:
        return "ISAC";
    case MCODEC_ID_G722:
        return "G722";
    case MCODEC_ID_AAC:
        return "AAC";
    case MCODEC_ID_MAX:
        return "MAX"; 
    case MCODEC_ID_NONE:
    case MCODEC_ID_UNKNOWN:
    default:
        return "none";
    }
}
class SilenceUtil {
public:
private:
    AVCodecContext* _codecCtx = nullptr;
    std::map<int, AVPacket*> _slience_map;

public:
    ~SilenceUtil() {
        flush();

        if (_codecCtx) {
            avcodec_close(_codecCtx);
            _codecCtx = nullptr;
        }

        for(auto i : _slience_map) {
            av_packet_unref(i.second);
        }
        _slience_map.clear();

    }

    int open(enum AVCodecID codecID, int sample_rate, int channels, int bitrate) {
        if(_codecCtx) {
            return -(__LINE__);
        }

        AVCodec* pCodec = avcodec_find_encoder(codecID);
        if (!pCodec) {
            av_log(NULL, AV_LOG_ERROR, "%s", "Not found this codec!");
            return -(__LINE__);
        }

        _codecCtx = avcodec_alloc_context3(pCodec);
        if (!_codecCtx) {
            av_log(NULL, AV_LOG_ERROR, "%s", "Not init codec!");
            return -(__LINE__);
        }
        
        _codecCtx->codec_id = pCodec->id;
        _codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
        _codecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
        _codecCtx->sample_rate = sample_rate;
        _codecCtx->channel_layout = av_get_default_channel_layout(channels);
        _codecCtx->channels = channels;
        _codecCtx->bit_rate = bitrate;

        int ret = avcodec_open2(_codecCtx, pCodec, NULL);
        if(ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "%s", "Failed to open encoder!");
            return -(__LINE__);
        }

        return 0;
    }

    AVPacket* ff_get_silence_pkt(int frame_size) {
        if(!_codecCtx) {
            return nullptr;
        }

        auto search = _slience_map.find(frame_size);
        if(_slience_map.end() != search) {
            return search->second;
        }

        AVFrame* frame = ff_get_silence_pcm(frame_size);
        
        AVPacket* pkt = ff_encode_opus(frame);
        if(!pkt) {
            return nullptr;
        }

        _slience_map[frame_size] = pkt;

        return pkt;
    }

private:
    AVFrame* ff_get_silence_pcm(int frame_size) {
        if(!frame_size) {
            if(_codecCtx) {
                frame_size = AUDIO_SAMPLE_DURATIN * 1.0 / 1000 * _codecCtx->sample_rate;
            } else {
                frame_size = 960;
            }
        }

        AVFrame* frame        = av_frame_alloc();
        frame->nb_samples     = frame_size;
        frame->channel_layout = av_get_default_channel_layout(_codecCtx->channels);  
        frame->channels       =  _codecCtx->channels;
        frame->format         = (enum AVSampleFormat)_codecCtx->sample_fmt;  
        frame->sample_rate    = _codecCtx->sample_rate;

        av_frame_get_buffer(frame, 0);

        //根据参数填充静音帧数据，PCM data
        av_samples_set_silence(frame->data, 0, frame->nb_samples, frame->channels, (enum AVSampleFormat)frame->format);

        return frame;
    }

    AVPacket* ff_encode_opus(AVFrame* frame) {
        AVPacket* pkt = av_packet_alloc();

        int ret = avcodec_send_frame(_codecCtx, frame);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "%s", "avcodec_send_frame error\n");
        }

        ret = avcodec_receive_packet(_codecCtx, pkt);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "%s", "avcodec_receive_packet！error \n");
            // av_strerror();
            return nullptr;
        }

        av_frame_free(&frame);
        return pkt;
    }

    void flush() {
        if(!_codecCtx) {
            return ;
        }

        AVPacket* pkt = av_packet_alloc();

        int ret = avcodec_send_frame(_codecCtx, nullptr);
        if (ret < 0) {
            return ;
        }

        while(1) {
            ret = avcodec_receive_packet(_codecCtx, pkt);
            if (ret){
                return ;
            }
            av_packet_unref(pkt);
        }
    }
};

static SilenceUtil silenceUtil;

RTPWriter::RTPWriter() :
        _video_depacketizer(NULL),
        _audio_reorder(32),
        _video_reorder(32),
        _has_video(false),
        _video_codec(MCODEC_ID_UNKNOWN),
        _video_width(0),
        _video_height(0),
        _video_fps(0),
        _has_audio(false),
        _audio_codec(MCODEC_ID_UNKNOWN),
        _audio_sample_rate(0),
        _audio_channels(0),
        _recorder(NULL),
        _rtp_map()
{

}

RTPWriter::~RTPWriter() {
    if (_video_depacketizer) {
        delete _video_depacketizer;
        _video_depacketizer = NULL;
    }

    // if(fp_) {
    //     fclose(fp_);
    //     fp_ = nullptr;
    // }
}

RTPWriter &
RTPWriter::video(const mediacodec_id_t codec, const uint32_t width, const uint32_t height, const uint32_t fps) {
    _has_video = true;
    _video_codec = codec;
    _video_width = width;
    _video_height = height;
    _video_fps = fps;

    if (codec == MCODEC_ID_VP8) {
        _video_depacketizer = new VP8Depacketizer();
    } else if (codec == MCODEC_ID_H264) {
        _video_depacketizer = new H264Depacketizer();
    } else {
        WRITER_ERROR("RTPWriter: unsupported video codec", FMT().kv("codec", codec));
        exit(-1);
    }

    return *this;
}

RTPWriter &
RTPWriter::audio(const mediacodec_id_t codec, const uint32_t sample_rate, const uint32_t channels) {
    _has_audio = true;
    _audio_codec = codec;
    _audio_sample_rate = sample_rate;
    _audio_channels = channels;

    enum AVCodecID id = AV_CODEC_ID_OPUS;
    int bitrate = 128000;
    if(MCODEC_ID_AAC == codec) {
        id = AV_CODEC_ID_AAC;
    }else if(MCODEC_ID_G722 == codec) {
        id = AV_CODEC_ID_ADPCM_G722;
    }else if(MCODEC_ID_ILBC == codec) {
        id = AV_CODEC_ID_ILBC;
    }
    silenceUtil.open(id, sample_rate, channels, bitrate);
    //_audio_depacketizer = new DummyAudioDepacketizer(codec);
    return *this;
}

void RTPWriter::output(const std::string &filename) {
    if(_has_video) {
        WRITER_INFO("output video parameters",
                FMT().kv("video_codec", getCodecNameByType(_video_codec))
                        .kv("video_width", _video_width)
                        .kv("video_height", _video_height)
                        .kv("video_fps", _video_fps));
    } else {
        WRITER_INFO("output no video",FMT());
    }
    if(_has_audio) {
        WRITER_INFO("output parameters",
                FMT().kv("audio_codec", getCodecNameByType(_audio_codec))
                        .kv("audio_samplerate", _audio_sample_rate)
                        .kv("audio_channels", _audio_channels)
        );
    } else {
        WRITER_INFO("output no audio", FMT());
    }

    std::string ext = "mkv";
    // 1. a+v or a or v
    if (_has_video && _has_audio) {

        if (_video_codec == MCODEC_ID_VP8) {
            ext = "webm";
        } else if (_video_codec == MCODEC_ID_H264) {
            if (_audio_codec == MCODEC_ID_AAC) {
                ext = "mkv";
            } else if (_audio_codec == MCODEC_ID_OPUS) {
                ext = "mkv";
            } else {
                WRITER_ERROR("unexpected video/audio codec", FMT().kv("video", _video_codec).kv("audio", _audio_codec));
                exit(-1);
            }
        } else {
            WRITER_ERROR("video codec not supported", FMT().kv("codec", _video_codec));
            exit(-1);
        }
    } else if (_has_audio) {
        if (_audio_codec == MCODEC_ID_OPUS) {
            ext = "webm";
        } else if (_audio_codec == MCODEC_ID_AAC) {
            ext = "mkv";
        } else {
            WRITER_ERROR("audio codec not supported", FMT().kv("codec", _audio_codec));
            exit(-1);
        }
    } else if (_has_video) {
        if (_video_codec == MCODEC_ID_VP8) {
            ext = "webm";
        } else if (_video_codec == MCODEC_ID_H264) {
            ext = "mkv";
        } else {
            WRITER_ERROR("video codec not supported", FMT().kv("codec", _video_codec));
            exit(-1);
        }
    } else {
        WRITER_ERROR("no video or audio to output", FMT());
        exit(-1);
    }

    {
        if(_buffer) {
        delete _buffer;
        _buffer = nullptr; 
        }
        _buffer = new XVBuffer();

        XVBuffer::Listener listen;
        listen.video = [this](uint8_t* data, size_t len, const int64_t& pts, bool key_frame)->int {
            // WRITER_ERROR("Video ", FMT().kv("pts", pts));
            if(rr_process_video(_recorder, pts, data, len, key_frame) != 0) {
                return -1;
            }
            ++_video_frame_count;
            if(_begin_ts > pts || -1 == _begin_ts) {
                _begin_ts = pts;
            }
            return 0;
        };
        listen.audio = [this](uint8_t* data, size_t len, const int64_t& pts)->int {
            // WRITER_ERROR("AAAAAAAAudio ", FMT().kv("pts", pts));
            if(rr_process_audio(_recorder, pts, data, len) != 0)  {
                return -1;
            }
            ++_audio_frame_count;
            if(_begin_ts > pts || -1 == _begin_ts) {
                _begin_ts = pts;
            }
            return 0;
        };
        _buffer->setListener(listen);

        if(_buffer->init(102400, _has_video, _video_width, _video_height, 180000, _video_fps, _video_codec) < 0) {
            WRITER_ERROR("failed to init media buffer.", FMT());
            exit(-1);
        }
    }

    _output_file = filename;
    _media_file = _output_file + "." + ext;
    // recorder 通过output filename字段查找生成的tmp media
    std::cerr << "output filename = " << _media_file.c_str() << std::endl;
    _ts_file_name = _output_file + ".ts";
    // 2. open rav_record_t
    _recorder = rr_open(_media_file.c_str(), _video_codec, _video_width, _video_height, _video_fps,
                        _audio_codec, _audio_sample_rate, _audio_channels, _audio_sample_rate, _audio_channels,
                        NULL, 128);
    if (_recorder == NULL) {
        WRITER_ERROR("rr_open failed", FMT().kv("file", _media_file));
        exit(-1);
    }
    return;
}

void RTPWriter::close() {
    if(_buffer) {
        _buffer->flush();
        delete _buffer;
        _buffer = nullptr;
    }

    std::ofstream ts_file(_ts_file_name, std::ios::out|std::ios::trunc);
    if (!ts_file.is_open()) {
        WRITER_ERROR("can't open ts file", FMT().kv("ts", _ts_file_name));
    } else {
        ts_file << "begin=" << _begin_ts;
        ts_file << std::endl;
        ts_file.close();
    }

    if (_recorder) {
        rr_close(_recorder);
    }

    //写ffprobe.json文件
    writeMediaInfo();

    WRITER_INFO("write video frame : ", FMT().kv("count", _video_frame_count));
    WRITER_INFO("write audio frame : ", FMT().kv("count", _audio_frame_count));
}

void RTPWriter::writeMediaInfo() {
    if(_output_file.empty()) {
        return ;
    }

	std::string::size_type p = _output_file.find_last_of('/');
	if (p == std::string::npos) {
		exit(-1);
	}
	std::string rtcName = _output_file.substr(p + 1, _output_file.size() - p);
	std::string writePath = _output_file.substr(0, p + 1);
    _media_pb.writeMediaInfoToFile(_media_file, rtcName, writePath);
    WRITER_INFO("ffprobe.json file = ", FMT().v(writePath  + rtcName + ".ffprobe.json"));
}

void RTPWriter::onCodecRegistry(CodecRegistry &codecs, uint32_t pt, const std::string& codec_name) {

}

void RTPWriter::onAudioPacket(RTPPacket *packet, uint64_t pts) {
    if (!_has_audio) {
        return;
    }

    if(packet->GetMCodec() != _audio_codec) {
        return ;
    }

    uint64_t pts2 = _use_ntp_as_pts? pts : packet->GetTime();
    if (_audio_last_pts) {
        if (pts2 < _audio_last_pts) {
            uint32_t d = packet->GetTimestamp() - _audio_last_ts;
            d = d / packet->GetClockRate();
            if (d > 60) {
                WRITER_WARN("audio pts screw detected, ts jumped over 60 secs", FMT().kv("gap", d));
            }
            uint64_t pts3 = _audio_last_pts + (packet->GetTimestamp() - _audio_last_ts) / (packet->GetClockRate() / 1000);
            WRITER_WARN("audio pts screw corrected", FMT().kv("from", pts2).kv("to", pts3));
            pts2 = pts3;
            //pts2 = _audio_last_pts;
        }
    }

    WRITER_DEBUG("AAAAAAUDIO", FMT().kv("packet", packet->GetExtSeqNum()).kv("pts", pts2).kv("pts-delta", (DWORD)(pts2 - _audio_last_pts)));
    // 针对rtp dtx产生相应的静音包
    uint32_t ts_diff = packet->GetTimestamp() - _audio_last_ts;
    uint32_t step_sample = (AUDIO_SAMPLE_DURATIN * 1.0 / 1000) * packet->GetClockRate();
    uint64_t tmp_pts = _audio_last_pts;
    if(0 == _audio_last_ts) {
        ts_diff = step_sample;
    }

    if(1 == packet->GetMediaLength() || (ts_diff / step_sample) > 1) {
        for(uint32_t i = 0; i < ts_diff; i+=step_sample) {
            AVPacket* pkt = silenceUtil.ff_get_silence_pkt(step_sample);
            if(!pkt) {
                continue;
            }

            tmp_pts += AUDIO_SAMPLE_DURATIN;

            if(tmp_pts > pts2 || i + step_sample >= ts_diff) {
                tmp_pts = pts2;
            }

            _buffer->add(pkt->data, pkt->size, tmp_pts, MediaFrame::Type::Audio, true);
        }
    } else {
        _buffer->add(packet->GetMediaData(), packet->GetMediaLength(), pts2, MediaFrame::Type::Audio, true);
    }

    _audio_last_pts = pts2;
    _audio_last_ts = packet->GetTimestamp();
}

void RTPWriter::onVideoPacket(RTPPacket *packet, uint64_t pts) {
    if (!_has_video) {
        return;
    }

    if(packet->GetMCodec() != _video_codec) {
        return ;
    }

    uint64_t pts2 = _use_ntp_as_pts? pts : packet->GetTime();
    if (_video_last_pts) {
        if (pts2 < _video_last_pts) {
            uint32_t d = packet->GetTimestamp() - _video_last_ts;
            d = d / packet->GetClockRate();
            if (d > 60) {
                WRITER_WARN("video pts screw detected, ts jumped over 60 secs", FMT().kv("gap", d));
            }
            uint64_t pts3 = _video_last_pts + (packet->GetTimestamp() - _video_last_ts) / (packet->GetClockRate()/1000);
            WRITER_WARN("video pts screw corrected", FMT().kv("from", pts2).kv("to", pts3));
            pts2 = pts3;
            //pts2 = _video_last_pts;
        }
    }

    if((_video_last_cycles == packet->GetSeqCycles() && _video_last_seq + 1 != packet->GetSeqNum())
    || (_video_last_cycles + 1 == packet->GetSeqCycles() && (_video_last_seq != 65535 || 0 != packet->GetSeqNum()))) {
        _video_depacketizer->ResetFrame();
        _drop_mod = true;
    }

    if(_drop_mod) {
        if (packet->GetCodec() == VideoCodec::H264) {
            h264::nal_packet nalp;
            int nalp_read = nalp.read(packet->GetMediaData(), packet->GetMediaLength());
            if (nalp_read && nalp.header.type == h264::STAP_A) {
                for (auto& unit : nalp.stap_a.units) {
                    if (unit.header.type == h264::SPS || unit.header.type == h264::PPS) {
                        _sps_pps_cache.push_back(packet->Clone());
                        WRITER_DEBUG("cached sps pps", FMT().kv("seq", packet->GetSeqNum()));
                        return;
                    }
                }
            }
            for (auto& sps_pps : _sps_pps_cache) {
                WRITER_DEBUG("writing cached sps pps", FMT().kv("seq", sps_pps->GetSeqNum()));
                VideoFrame* frame = (VideoFrame*)_video_depacketizer->AddPacket(sps_pps);
                delete sps_pps;
                if (!frame) {
                    continue;
                }
                _buffer->add(frame->GetData(), frame->GetLength(), pts2, MediaFrame::Type::Video, true);
                _drop_mod = false;
                // rr_process_video(_recorder, pts, frame->GetData(), frame->GetLength(), true);
            }
            _sps_pps_cache.clear();
        }else if(packet->GetCodec() == VideoCodec::VP8) {         //出现丢包后，丢弃非关键帧，并等待关键帧的出现，防止转换的视频出现花屏
            //目的是找到下一个关键帧，golden frame，altref frame。
            const uint8_t* buf = packet->GetMediaData();
            // bool direct = false;    //是否直接扔掉后续的帧，直到下一个关键帧到来
            int offset = 0;
            bool x = buf[offset] & 0x80;			//X:1
            bool s = buf[offset] & 0x10;			//S:1
            // printf("|sn=%5d|m=%d|x=%d|s=%d|", packet->GetSeqNum(), packet->GetMark(), x, s);
            offset += 1;
            if(x) {
                bool i		= buf[offset] & 0x80;		//I:1
                bool l		= buf[offset] & 0x40;		//L:1
                bool t		= buf[offset] & 0x20;		//T:1
                // printf("i=%d|l=%d|t=%d|", i, l, t);
                // bool k		= buf[offset] & 0x10;		//K:1
                offset += 1;
                if(i) {
                    bool m = buf[offset] & 0x80;				//M
                    // short picture_id = 0;
                    if(m) {
                        // picture_id = (uint16_t)(buf[offset] & 0x7f) << 8 | \
                            (uint16_t)buf[offset + 1];			//PictureID     15 bytes
                        offset += 1;
                    } 
                    // else {
                    //     picture_id = buf[offset] & 0x70;		//PictureID 
                    // }
                    // printf("m=%d|picID=%5d|", m, picture_id);
                    offset += 1;
                }
                if(l) {
                    offset += 1;
                }

                if(t) {
                    offset += 1;
                }
                bool p = (buf[offset] & 0x01) == 0;
                // printf("p=%d|", p);
                if(!l && !t) {
                    if(p && s) {
                        _drop_mod = false;
                    } else {
                        // printf("\n");
                        // _drop_mod = true;
                    }
                } else {
                    _drop_mod = false;
                }
            } else {
                bool p = (buf[offset] & 0x01) == 0;
                // printf("p=%d|", p);
                if(p) {
                    _drop_mod = false;
                } else {
                    // printf("\n");
                    // _drop_mod = true;
                }
            }
        }
        if(_drop_mod) {
            return ;
        }
        // printf("\n");
    }

    // cache sps pps before first video packet


    
// this check duplicates with depacketizer's timestamp check, and it cannot handle seq gap caused by fec
//    if (packet->GetSeqNum() - _video_last_seq != 1 && !_video_last_seq_set) {
//        _video_depacketizer->ResetFrame();
//    }

//    if (packet->GetCodec() == VideoCodec::VP8) {
//        uint8_t* buf = packet->GetMediaData();
//        if (buf[0] >> 4 & 0x1) {
//            _video_depacketizer->ResetFrame();
//        }
//    } else if (packet->GetCodec() == VideoCodec::H264) {
//
//    }



    _video_last_seq = packet->GetSeqNum();
    // _video_last_seq_set = true;
    _video_last_cycles = packet->GetSeqCycles();

    WRITER_DEBUG("VIDEO", FMT()
        .kv("packet", packet->GetExtSeqNum())
        .kv("ts",  packet->GetTimestamp()).kv("d-ts", packet->GetTimestamp() - _video_last_ts)
        .kv("pts", pts2).kv("d-pts", pts2 - _video_last_pts));

    // 1. merge packets into frame
    VideoFrame* frame = (VideoFrame*)_video_depacketizer->AddPacket(packet);
    if (!frame) {
        return;
    }

    bool is_key_frame = frame->IsIntra();

    if (packet->GetCodec() == VideoCodec::H264) {
        int naltype = frame->GetData()[4] & 0x1f;
        if ((naltype == 0x07) || (naltype==0x05) || (naltype == 0x08)) {
            is_key_frame = true;
        } else {
            is_key_frame = false;
        }
    }
    std::string frame_type("unknown");
    if (is_key_frame) {
        frame_type = "I";
    } else {
        frame_type = "non-I";
    }

    WRITER_DEBUG("Video Frame", FMT().kv("Seq", packet->GetSeqNum()).kv("frame", frame_type));

    _video_last_pts = pts2;
    _video_last_ts = packet->GetTimestamp();
    _buffer->add(frame->GetData(), frame->GetLength(), pts2, MediaFrame::Type::Video, is_key_frame);
}

