#include "CameraTask.hpp"
#include <atomic>
#include <arpa/inet.h>


namespace wd{

void CameraTask::sendMetaData(AVFormatContext *fmtCtx,int videoStreamIdx){
    AVStream *stream=fmtCtx->streams[videoStreamIdx];
    AVCodecParameters *codecPar=stream->codecpar;
    VideoMetadata meta;
    //编解码器类型
    meta.codecId=codecPar->codec_id;
    printf("meta.codecId is %d \n",meta.codecId);
    //视频分辨率
    meta.width=codecPar->width;
    meta.height=codecPar->height;
    printf("meta.width is %d \nmeta.height is %d \n",meta.width,meta.height);
    //时间基的分子
    meta.timeBaseNum=stream->time_base.num;
    printf("meta.timeBaseNum is %d \n",stream->time_base.num);
    //时间基的分母
    meta.timeBaseDen=stream->time_base.den;
    printf("meta.timeBaseDen is %d \n",stream->time_base.den);
    //meta已经组装完毕！
    //TLV tlv1;
    if(codecPar->extradata){
        meta.spsSPS.assign((char *)codecPar->extradata,codecPar->extradata_size);
    }
    printf("meta.spsSPS is %s \n",meta.spsSPS.c_str());
    vector<char> metaData=serializeMetaData(meta);
    printf("元数据总大小:%zu 字节\n",metaData.size());

    //分片发送
    //这个data就是这个msg字符串
    fragmentAndSend(metaData.data(),metaData.size(),TASK_META_DATA);
}


void CameraTask::sendPacketToClient(AVPacket *pkt){
    VideoFrame frame;
    frame.size=pkt->size;
    frame.pts=pkt->pts;
    frame.dts=pkt->dts;
    frame.data=pkt->data;
    //序列化AVPacket
    vector<char> frameData;
    uint32_t netSize=frame.size;
    frameData.insert(frameData.end(),(char*)&netSize,(char*)&netSize+4);
    uint64_t netPts=frame.pts;
    frameData.insert(frameData.end(),(char*)&netPts,(char*)&netPts+8);
    uint64_t netDts=frame.dts;
    frameData.insert(frameData.end(),(char*)&netDts,(char*)&netDts+8);
    frameData.insert(frameData.end(),(char*)frame.data,(char*)frame.data+frame.size);
    printf("frameData has already been seralized!\n");
    //此时已经序列化完成，现在要做的是将它分片发送
    fragmentAndSend(frameData.data(),frameData.size(),TASK_VIDEO_FRAME);
    printf("fragmentAVPacket has already been sent!\n");
}

void CameraTask::fragmentAndSend(const char *data,size_t data_len,uint8_t type){
    //每片数据帧的最大传输字节
    const size_t MAX_DATA_LENGTH=1380;
    //生成每个包的唯一id
    uint32_t packet_id=generatePacketId();
    //计算需要分多少片
    size_t total_fragments=(data_len+MAX_DATA_LENGTH-1)/MAX_DATA_LENGTH;
    for(size_t i=0;i<(total_fragments);++i){
        DataFragment frag{};
        frag.type=type;
        //超过了1片就分片
        frag.isFragmented=(total_fragments>1)?1:0;
        //再给头部填充
        frag.totalFragments=total_fragments;
        frag.currentFragment=i;
        frag.packerId=packet_id;
        frag.dataLen=std::min(MAX_DATA_LENGTH,data_len-i*MAX_DATA_LENGTH);
        //转回主机字节计算
        size_t copy_len=frag.dataLen;
        memcpy(frag.data,data+i*MAX_DATA_LENGTH,copy_len);
        //到此目前一个分片已经包装完
        //接下来再给他套一层TLV
        wd::TLV tlv;
        tlv._type=TASK_STREAM_DATA;
        tlv._length=sizeof(frag);
        memcpy(tlv.data,&frag,sizeof(frag));
        _conn->sendInLoop(tlv);
        printf("发送了第%ld个分片\n",i);
    }
}

// 序列化meta数据
vector<char> CameraTask::serializeMetaData(VideoMetadata meta) {
    vector<char> serializedData;

    // 序列化cameraId
    uint32_t netCameraId = htonl(meta.cameraId);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netCameraId),
                         reinterpret_cast<char*>(&netCameraId) + sizeof(netCameraId));

    // 序列化codecId
    uint32_t netCodecId = htonl(meta.codecId);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netCodecId),
                         reinterpret_cast<char*>(&netCodecId) + sizeof(netCodecId));

    // 序列化width
    uint32_t netWidth = htonl(meta.width);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netWidth),
                         reinterpret_cast<char*>(&netWidth) + sizeof(netWidth));

    // 序列化height
    uint32_t netHeight = htonl(meta.height);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netHeight),
                         reinterpret_cast<char*>(&netHeight) + sizeof(netHeight));

    // 序列化timeBaseNum
    uint32_t netTimeBaseNum = htonl(meta.timeBaseNum);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netTimeBaseNum),
                         reinterpret_cast<char*>(&netTimeBaseNum) + sizeof(netTimeBaseNum));

    // 序列化timeBaseDen
    uint32_t netTimeBaseDen = htonl(meta.timeBaseDen);
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&netTimeBaseDen),
                         reinterpret_cast<char*>(&netTimeBaseDen) + sizeof(netTimeBaseDen));

    // 序列化spsSPS长度和数据
    uint32_t spsSize = htonl(meta.spsSPS.size());
    serializedData.insert(serializedData.end(),
                         reinterpret_cast<char*>(&spsSize),
                         reinterpret_cast<char*>(&spsSize) + sizeof(spsSize));

    if (!meta.spsSPS.empty()) {
        serializedData.insert(serializedData.end(),
                             meta.spsSPS.begin(),
                             meta.spsSPS.end());
    }

    return serializedData;
}

// 生成唯一id
uint32_t CameraTask::generatePacketId() {
    static std::atomic<uint32_t> idCounter(0);
    return idCounter.fetch_add(1, std::memory_order_relaxed);
}

void CameraTask::process(){
    //音视频封装格式上下文结构体
    AVFormatContext *pFormatCtx=nullptr;
    //音视频编码器上下文结构体
    AVCodecContext *pCodecCtx=nullptr;
    //音视频编码器结构体
    AVCodec *pCodec=nullptr;
    //存储一帧压缩编码数据
    AVPacket *pPacket=nullptr;

    //初始化FFmpeg网络模块
    avformat_network_init();

    //分配并初始化一个AVFormatContext结构体
    pFormatCtx=avformat_alloc_context();

    //配置输入流的参数选项(可选)
    //AVDictionary *avdic=nullptr;
    
    //打开媒体是否成功
    if(avformat_open_input(&pFormatCtx,"rtmp://192.168.105.222:1935/hlsram/live1",nullptr,nullptr)!=0){
        printf("can't open the file. \n");
        return;
    }

    //是否可以解析流信息
    if(avformat_find_stream_info(pFormatCtx,nullptr)<0){
        printf("Could't find stream information.\n");
        return;
    }

    //查找视频中包含的流信息，音频流先不处理
    int videoStreamIdx=-1;
    printf("apFormatCtx->nb_stream:%d\n",pFormatCtx->nb_streams);
    videoStreamIdx=av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,nullptr,0);
    if(videoStreamIdx<0){
        cout << "av_find_best_stream error:" << av_get_media_type_string(AVMEDIA_TYPE_VIDEO) << endl;
        return;
    }
    //打印流的个数
    printf("video stream idx:%d\n",videoStreamIdx);

    //查找解码器
    cout << "avcodec_find_decoder..." << endl;
    cout << "AVCodecID:" << pFormatCtx->streams[videoStreamIdx]->codecpar->codec_id << endl;
    pCodec=avcodec_find_decoder(pFormatCtx->streams[videoStreamIdx]->codecpar->codec_id);

    if(pCodec==nullptr){
        cout << "Codec not found.\n" << endl;
        return;
    }

    //开辟解码器空间
    pCodecCtx=avcodec_alloc_context3(pCodec);
    //拷贝解码器参数
    avcodec_parameters_to_context(pCodecCtx,pFormatCtx->streams[videoStreamIdx]->codecpar);
    cout << "pCodecCtx->codec_id:" << pCodecCtx->codec_id << endl;

    //初始化解码器参数
    pCodecCtx->bit_rate=0; //初始化为0
    pCodecCtx->time_base.num=1;
    pCodecCtx->time_base.den=25; //这两行表示一秒钟25帧
    pCodecCtx->frame_number=1; //每包一个视频帧

    //分配AVPacket结构体
    pPacket=(AVPacket*)malloc(sizeof(AVPacket));
    int y_size=pCodecCtx->width*pCodecCtx->height;
    //分配数据包内部的数据缓冲区
    av_new_packet(pPacket,y_size);

    //!!!!!发送编码器参数
    sendMetaData(pFormatCtx,videoStreamIdx);
    cout << "MetaData send successful!" << endl;

    //循环读取AVPacket包
    while(1){
        if(av_read_frame(pFormatCtx,pPacket)<0){
            break; //这里认为视频读取完了
        }
        
        //!!!!!发送AVPacket包
        sendPacketToClient(pPacket);
        cout << "AVPacket send successful!" << endl;
    }
}


}//end of namespace wd
