#include "dummy_sink.h"

//#include <Qimage>
//#include <QImageWriter>
#include <iostream>
#include <vector>
#include <sstream>
#include <iomanip>
#include <chrono>
#include <ctime>
#include <string>

#include "NalParser.h"

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/mathematics.h>
#include <libswscale/swscale.h>
}

// Implementation of "DummySink":

// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
// Define the size of the buffer that we'll use:
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 1024000

DummySink* DummySink::createNew(RtspSession* session, UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) {
    return new DummySink(session, env, subsession, streamId);
}

DummySink::DummySink(RtspSession* session,
                     UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
    : MediaSink(env),
      session(session),
      frameIndex(0),
      fSubsession(subsession) {
    fStreamId = strDup(streamId);
    fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
    fReceiveBufferAV = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE + 4];

    fReceiveBufferAV[0] = 0;
    fReceiveBufferAV[1] = 0;
    fReceiveBufferAV[2] = 0;
    fReceiveBufferAV[3] = 1;

    //av_init_packet(&avpkt);
    //avpkt.flags |= AV_PKT_FLAG_KEY;
    //avpkt.pts = avpkt.dts = 0;

    memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    // codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    // if (!codec) {
    //     envir() << "codec not found!";
    //     exit(4);
    // }

    // codecContext = avcodec_alloc_context3(codec);
    // frame = av_frame_alloc();
    // rgbFrame = av_frame_alloc();
    // avpicture_alloc( ( AVPicture *) rgbFrame, AV_PIX_FMT_RGB24, 1920, 1080);

    // if (codec->capabilities & CODEC_CAP_TRUNCATED) {
    //     codecContext->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames
    // }

    // codecContext->width = 1920;
    // codecContext->height = 1080;
    // codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

    // if (avcodec_open2(codecContext,codec,NULL) < 0) {
    //     envir() << "could not open codec";
    //     exit(5);
    // }

    // this->swsContext = sws_getContext( codecContext->width, codecContext->height, codecContext->pix_fmt, 1920, 1080,
    //                                    AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
}

DummySink::~DummySink() {
    delete[] fReceiveBuffer;
    delete [] fReceiveBufferAV;
    delete[] fStreamId;
}

void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
                                  struct timeval presentationTime, unsigned durationInMicroseconds) {
    DummySink* sink = (DummySink*)clientData;
    sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}

// void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
//                                   struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
//     // We've just received a frame of data.  (Optionally) print out information about it:
//     if (strcmp(fSubsession.codecName(),"H264") == 0) {
//         avpkt.data = fReceiveBufferAV;
//         avpkt.size = frameSize + 4;
//         memcpy (fReceiveBufferAV + 4, fReceiveBuffer, frameSize);
//         avpkt.data = fReceiveBufferAV;
//         len = avcodec_decode_video2 (codecContext, frame, &got_picture, &avpkt);
//         if (len < 0) {
//             envir() << "Error while decoding frame " << frameIndex << "\n";
//         }
//         if (got_picture) {
//             envir() << "->Picture decoded :" << frameIndex << "\n";
//             sws_scale(this->swsContext, this->frame->data,this->frame->linesize,
//                       0, this->codecContext->height,
//                       this->rgbFrame->data, this->rgbFrame->linesize);
//             QImage *image = new QImage(this->rgbFrame->data[0],
//                     this->codecContext->width,
//                     this->codecContext->height,
//                     QImage::Format_RGB888);
//             QString name = QString("/Users/hfli/Movies/%1.jpg").arg(frameIndex);

//             emit this->session->gotFrame(*image);
//             frameIndex ++;
//         }
//     }

//     // Then continue, to request the next frame of data:
//     continuePlaying();
// }

static std::string timestampToDateTimeString(long long timestamp) {
    // 将时间戳转换为 tm 结构体
    int ms = timestamp % 1000;
    std::time_t timestamp_t = (std::time_t)timestamp/1000;
    std::tm* tmPtr = std::localtime(&timestamp_t);
    if (!tmPtr) {
        return "Invalid timestamp";
    }

    // 使用 stringstream 格式化日期和时间
    std::stringstream ss;
    ss << std::put_time(tmPtr, "%Y-%m-%d %H:%M:%S");
    ss << " " << std::to_string(ms);

    return ss.str();
}


void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
                                  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
  // We've just received a frame of data.  (Optionally) print out information about it:
  if(strcmp(fSubsession.mediumName(), "video") == 0)
  {
    // struct tm *timep;
    // struct timespec start_spec;
    // clock_gettime(CLOCK_REALTIME, &start_spec);
    // timep = localtime(&start_spec.tv_sec);  //将timespec 转化为tm
    // printf("%d-%d-%d %d-%d-%d %ld\n", 1900 + timep->tm_year, 1 + timep->tm_mon, timep->tm_mday,
    //           timep->tm_hour, timep->tm_min, timep->tm_sec, start_spec.tv_nsec / 1000000);

    int frame_type = 0;
    if (strcmp(fSubsession.codecName(),"H264") == 0) {
      frame_type = h264_frame_type((unsigned char *)fReceiveBuffer, frameSize);
    } else if (strcmp(fSubsession.codecName(), "H265") == 0){
      frame_type = h265_frame_type((unsigned char *)fReceiveBuffer, frameSize);
    }

    if (m_HFFDecodeThread == nullptr) {
      m_HFFDecodeThread = std::make_shared<HFFDecodeThread>();
      if (strcmp(fSubsession.codecName(),"H264") == 0) m_HFFDecodeThread->Open("h264") ;
      else if (strcmp(fSubsession.codecName(), "H265") == 0) m_HFFDecodeThread->Open("h265");
      m_HFFDecodeThread->start();
    }
    
    unsigned long long recv_time =  gethrtime_us() / 1000;
    std::vector<int8_t> sei_payload;
    char sei_time[14] = {0};
    std::string frame_type_str;
    AVPacket* pkt = NULL;
    UserData* user_data = NULL;
    frameIndex ++;
    switch (frame_type) {
      case 0x01 :
        frame_type_str = "I Frame";
        pkt = av_packet_alloc();
        av_new_packet(pkt, frameSize + 4 + sps_len + pps_len);
        pkt->pts = frameIndex;
        memcpy(pkt->data, sps_buf, sps_len);
        memcpy(pkt->data + sps_len, pps_buf, pps_len);
        pkt->data[sps_len + pps_len] = 0;
        pkt->data[sps_len + pps_len + 1] = 0;
        pkt->data[sps_len + pps_len + 2] = 0;
        pkt->data[sps_len + pps_len + 3] = 1;
        memcpy(pkt->data + sps_len + pps_len + 4, fReceiveBuffer, frameSize);

        user_data = (UserData*)malloc(sizeof(UserData));
        user_data->id = pkt->pts;
        user_data->sei_time = _sei_time_ll;
        user_data->recv_frame_time = recv_time;
        m_HFFDecodeThread->Push(pkt, user_data);
        break;
      case 0x02:
        frame_type_str = "P Frame";
        pkt = av_packet_alloc();
        av_new_packet(pkt, frameSize + 4);
        pkt->pts = frameIndex;
        pkt->data[0] = 0;
        pkt->data[1] = 0;
        pkt->data[2] = 0;
        pkt->data[3] = 1;
        memcpy(pkt->data + 4, fReceiveBuffer, frameSize);

        std::cout << "pkt->pts:" << pkt->pts << std::endl;
        user_data = (UserData*)malloc(sizeof(UserData));
        user_data->id = pkt->pts;
        user_data->sei_time = _sei_time_ll;
        user_data->recv_frame_time = recv_time;
        m_HFFDecodeThread->Push(pkt, user_data);
        break;
      case 0x03:
        frame_type_str = "SPS Frame";
        if(sps_buf != nullptr) free(sps_buf);
        sps_len = frameSize + 4;
        sps_buf = (uint8_t*)malloc(sps_len);
        sps_buf[0] = 0;
        sps_buf[1] = 0;
        sps_buf[2] = 0;
        sps_buf[3] = 1;
        memcpy(sps_buf + 4, fReceiveBuffer, frameSize);
        break;
      case 0x04:
        frame_type_str = "PPS Frame";
        if(pps_buf != nullptr) free(pps_buf);
        pps_len = frameSize + 4;
        pps_buf = (uint8_t*)malloc(pps_len);
        pps_buf[0] = 0;
        pps_buf[1] = 0;
        pps_buf[2] = 0;
        pps_buf[3] = 1;
        memcpy(pps_buf + 4, fReceiveBuffer, frameSize);
        break;
      case 0x05:
        frame_type_str = "SEI Frame";
        if (GetSEIPayload ("H264", fReceiveBuffer, frameSize, sei_payload) ) {
          memcpy(sei_time, sei_payload.data()+16, 13);
          printf("##sei_time :%s\n", timestampToDateTimeString(atoll(sei_time)).c_str());
        }
        _sei_time_ll = atoll(sei_time);
        break;
      case 0x06:
        frame_type_str = "VPS Frame";
        break;
      default:
        frame_type_str = "Other Frame";
        break;
    }

    // printf("recv frame: time = %lld, type = %s, size=%d\n", recv_time, frame_type_str.c_str(), frameSize);
    // printf("file:%s function:%s line:%d sei_time = %lld\n", __FILE__, __FUNCTION__, __LINE__, atoll(sei_time));
    // printf("##recv time:%lld\n", recv_time);
    // printf("##sei_time :%lld\n", atoll(sei_time));

    //int n = 0;
    //if (frameSize > 100) n = 100;
    //else n = frameSize;
    //memcpy (fReceiveBufferAV + 4, fReceiveBuffer, frameSize);
    

    //m_HFFDecodeThread->Push(pkt, user_data);

      // for (int i = 0; i < n; i++) {
      //   printf("%d", fReceiveBufferAV[i]);
      // }
      // printf("\n");

      // avpkt.data = fReceiveBufferAV;
      // avpkt.size = frameSize + 4;
      // memcpy (fReceiveBufferAV + 4, fReceiveBuffer, frameSize);
      // avpkt.data = fReceiveBufferAV;
      // len = avcodec_decode_video2 (codecContext, frame, &got_picture, &avpkt);
      // if (len < 0) {
      //     envir() << "Error while decoding frame " << frameIndex << "\n";
      // }
      // if (got_picture) {
      //     envir() << "->Picture decoded :" << frameIndex << "\n";
      //     sws_scale(this->swsContext, this->frame->data,this->frame->linesize,
      //               0, this->codecContext->height,
      //               this->rgbFrame->data, this->rgbFrame->linesize);
      //     QImage *image = new QImage(this->rgbFrame->data[0],
      //             this->codecContext->width,
      //             this->codecContext->height,
      //             QImage::Format_RGB888);
      //     QString name = QString("/Users/hfli/Movies/%1.jpg").arg(frameIndex);
      //     emit this->session->gotFrame(*image);
      //     frameIndex ++;
      // }
  }
  
  // Then continue, to request the next frame of data:
  continuePlaying();
}



Boolean DummySink::continuePlaying() {
    if (fSource == NULL) return False; // sanity check (should not happen)

    // Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
    fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
                          afterGettingFrame, this,
                          onSourceClosure, this);
    return True;
}

void DummySink::setSprop(u_int8_t const* prop, unsigned size) {
    // uint8_t *buf;
    // uint8_t *buf_start;
    // buf = (uint8_t *)malloc(1000);
    // buf_start = buf + 4;

    // avpkt.data = buf;
    // avpkt.data[0]   = 0;
    // avpkt.data[1]   = 0;
    // avpkt.data[2]   = 0;
    // avpkt.data[3]   = 1;
    // memcpy (buf_start, prop, size);
    // avpkt.size = size + 4;
    // len = avcodec_decode_video2 (codecContext, frame, &got_picture, &avpkt);
    // if (len < 0) {
    //     envir() << "Error while decoding frame" << frame;
    //     //		exit(6);
    // }


    envir() << "after setSprop\n";
    //	exit (111);
}
