#include "libav.hpp"

#include <boost/endian/conversion.hpp>
#include <iostream>
#include <regex>

#ifdef USE_ROS
LibAV::LibAV(ros::NodeHandle* nh, int flag, std::string topic) : mode_(flag) {
  nh_ = nh;
  it_ = new image_transport::ImageTransport(*nh_);
  img_pub_ = it_->advertise(topic, 1);
}
#endif

LibAV::LibAV(int flag, std::string topic) : mode_(flag) {
#ifdef USE_ROS
  // ROS init
  nh_ = new ros::NodeHandle();
  it_ = new image_transport::ImageTransport(*nh_);
  img_pub_ = it_->advertise(topic, 1);
#endif
}

LibAV::~LibAV() {
  av_frame_free(&decframe_);
  av_frame_free(&frame_);
  avcodec_close(vstrm_->codec);
  avformat_close_input(&inctx_);
}

bool LibAV::init(std::string src, int min_frame_buffer, int buffer_size) {
  switch (mode_) {
    case NORMAL_STREAM:
      initAV(src, buffer_size);
      break;
    case UDP_STREAM:
      groupUDP(src);
      end_of_stream_ = true;
      initIOAV(min_frame_buffer, buffer_size);
      break;
    case MANUAL_IO:
      end_of_stream_ = true;
      initIOAV(min_frame_buffer, buffer_size);
      break;
    default:
      initAV(src, buffer_size);
      break;
  }
  return true;
}

int LibAV::groupUDP(std::string& uri) {
  std::smatch result;
  std::regex pattern("udp://(.+):(\\d+)");

  if (!std::regex_match(uri, result, pattern)) {
    std::cerr << "Only support UDP protocol now. Please check uri input like "
                 "udp://127.0.0.1:8000"
              << std::endl;
    exit(-1);
  } else {
    std::string tmp = "";
    udp_.bindServer(tmp, std::stoi(result[2]));
    tmp = result[1];
    udp_.joinGroup(tmp);
    udp_t_ = new std::thread([this] {
      std::vector<uint8_t> buf;
      while (1) {
        udp_.recvFromGroup(buf);
        if (!buf.size()) continue;
        // Filter for 716 image
        if (buf[0] == 0xee && buf[1] == 0x16) {
          // Get RGB image only
          if (buf.size() < 4 || buf[3] != 0xd6) continue;
          {
            // Append received buffer to global buffer
            saveToBuffer(buf, 6);
          }
        }
      }
    });
  }
}

int LibAV::initIOAV(int min_frame_buffer, int buffer_size) {
  av_register_all();
  io_buffer_ = NULL;

  AVInputFormat* in_fmt = av_find_input_format("h264");

  frame_buffer_.clear();

  io_buffer_ = (unsigned char*)av_malloc(buffer_size);

  /* Read data */
  avio_ctx_ = avio_alloc_context(io_buffer_, buffer_size, 0, this,
                                 LibAV::readPacket, NULL, NULL);

  inctx_ = avformat_alloc_context();

  inctx_->pb = avio_ctx_;
  inctx_->flags = AVFMT_FLAG_CUSTOM_IO;
  waitForBuffer(500000, 100);

  /* Allocate format context */
  int ret = avformat_open_input(&inctx_, NULL, in_fmt, NULL);
  if (ret < 0) {
    std::cerr << "Could not open input: " << std::oct << ret << std::endl;
    return -1;
  }
  waitForBuffer(500000);
  // Retrive input stream information
  ret = avformat_find_stream_info(inctx_, nullptr);
  if (ret < 0) {
    std::cerr << "Failed to find stream info. ret=" << ret;
    return 2;
  }

  waitForBuffer(500000);
  // Find primary Video stream
  AVCodec* vcodec = nullptr;
  ret = av_find_best_stream(inctx_, AVMEDIA_TYPE_VIDEO, -1, -1, &vcodec, 0);
  if (ret < 0) {
    std::cerr << "Fail to find best stream. ret=" << ret;
    return 3;
  }
  vstrm_idx_ = ret;
  vstrm_ = inctx_->streams[vstrm_idx_];

  waitForBuffer(500000);
  // Open video decoder context
  ret = avcodec_open2(vstrm_->codec, vcodec, nullptr);
  if (ret < 0) {
    std::cerr << "Fail to open decoder. ret=" << ret;
    return 4;
  }

  // Print input video stream informataion
  std::cout << "format: " << inctx_->iformat->name << "\n"
            << "vcodec: " << vcodec->name << "\n"
            << "size:   " << vstrm_->codec->width << 'x'
            << vstrm_->codec->height << "\n"
            << "fps:    " << av_q2d(vstrm_->codec->framerate) << " [fps]\n"
            << "length: "
            << av_rescale_q(vstrm_->duration, vstrm_->time_base, {1, 1000}) /
                   1000.
            << " [sec]\n"
            << "pixfmt: " << av_get_pix_fmt_name(vstrm_->codec->pix_fmt) << "\n"
            << "frame:  " << vstrm_->nb_frames << "\n"
            << std::flush;

  // Initialize sample scaler
  dst_width_ = vstrm_->codec->width;
  dst_height_ = vstrm_->codec->height;
  const AVPixelFormat dst_pix_fmt = AV_PIX_FMT_BGR24;
  swsctx_ =
      sws_getCachedContext(nullptr, vstrm_->codec->width, vstrm_->codec->height,
                           vstrm_->codec->pix_fmt, dst_width_, dst_height_,
                           dst_pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
  if (!swsctx_) {
    std::cerr << "Fail to get sws cached context.";
    exit(5);
  }
  std::cout << "output: " << dst_width_ << 'x' << dst_height_ << ','
            << av_get_pix_fmt_name(dst_pix_fmt) << std::endl;
  // Allocate frame buffer for output
  frame_ = av_frame_alloc();
  framebuf_.resize(avpicture_get_size(dst_pix_fmt, dst_width_, dst_height_));
  avpicture_fill(reinterpret_cast<AVPicture*>(frame_), framebuf_.data(),
                 dst_pix_fmt, dst_width_, dst_height_);

  decframe_ = av_frame_alloc();
  end_of_stream_ = false;

  frame_buffer_.clear();
  stream_th_ = new std::thread(&LibAV::openThread, this);
}

int LibAV::initAV(std::string& src, int buffer_size) {
  // Initialize FFmpeg library
  av_register_all();
  int ret;

  avformat_network_init();
  if (!(inctx_ = avformat_alloc_context())) {
    ret = AVERROR(ENOMEM);
    return -1;
  }
  // inctx_->flags|=AVFMT_FLAG_NOBUFFER;
  inctx_->flags |= AVFMT_FLAG_FLUSH_PACKETS;

  // Set options for different streams
  AVDictionary* options = NULL;
  if (std::regex_match(src, std::regex("^rtsp://.*"))) {
    av_dict_set_int(&options, "buffer_size", buffer_size, 0);
  }

  // Open input and read its header
  ret = avformat_open_input(&inctx_, src.c_str(), nullptr, &options);
  if (ret < 0) {
    std::cerr << "Failed to open " << src << ". ret=" << ret;
    return 1;
  }
  av_dict_free(&options);
  // Retrive input stream information
  ret = avformat_find_stream_info(inctx_, nullptr);
  if (ret < 0) {
    std::cerr << "Failed to find stream info. ret=" << ret;
    return 2;
  }
  av_dump_format(inctx_, 0, "", 0);
  // Find primary Video stream
  AVCodec* vcodec = nullptr;
  ret = av_find_best_stream(inctx_, AVMEDIA_TYPE_VIDEO, -1, -1, &vcodec, 0);
  if (ret < 0) {
    std::cerr << "Fail to find best stream. ret=" << ret;
    return 3;
  }
  vstrm_idx_ = ret;
  vstrm_ = inctx_->streams[vstrm_idx_];
  // Open video decoder context
  ret = avcodec_open2(vstrm_->codec, vcodec, nullptr);
  if (ret < 0) {
    std::cerr << "Fail to open decoder. ret=" << ret;
    return 4;
  }

  // Print input video stream informataion
  std::cout << "infile: " << src << "\n"
            << "format: " << inctx_->iformat->name << "\n"
            << "vcodec: " << vcodec->name << "\n"
            << "size:   " << vstrm_->codec->width << 'x'
            << vstrm_->codec->height << "\n"
            << "fps:    " << av_q2d(vstrm_->codec->framerate) << " [fps]\n"
            << "length: "
            << av_rescale_q(vstrm_->duration, vstrm_->time_base, {1, 1000}) /
                   1000.
            << " [sec]\n"
            << "pixfmt: " << av_get_pix_fmt_name(vstrm_->codec->pix_fmt) << "\n"
            << "frame:  " << vstrm_->nb_frames << "\n"
            << std::flush;

  // Initialize sample scaler
  dst_width_ = vstrm_->codec->width;
  dst_height_ = vstrm_->codec->height;
  const AVPixelFormat dst_pix_fmt = AV_PIX_FMT_BGR24;
  swsctx_ =
      sws_getCachedContext(nullptr, vstrm_->codec->width, vstrm_->codec->height,
                           vstrm_->codec->pix_fmt, dst_width_, dst_height_,
                           dst_pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
  if (!swsctx_) {
    std::cerr << "Fail to get sws cached context.";
    exit(5);
  }
  std::cout << "output: " << dst_width_ << 'x' << dst_height_ << ','
            << av_get_pix_fmt_name(dst_pix_fmt) << std::endl;
  // Allocate frame buffer for output
  frame_ = av_frame_alloc();
  framebuf_.resize(avpicture_get_size(dst_pix_fmt, dst_width_, dst_height_));
  avpicture_fill(reinterpret_cast<AVPicture*>(frame_), framebuf_.data(),
                 dst_pix_fmt, dst_width_, dst_height_);

  decframe_ = av_frame_alloc();
  end_of_stream_ = false;

  stream_th_ = new std::thread(&LibAV::openThread, this);
}

void LibAV::saveToBuffer(std::vector<uint8_t>& buf, int offset) {
  std::lock_guard<std::mutex> lock(buf_lock_);
  if (frame_buffer_.size() > 5000000) frame_buffer_.clear();
  frame_buffer_.insert(frame_buffer_.end(), buf.begin() + offset, buf.end());
}

void LibAV::saveToBuffer(uint8_t* buf, int length, int offset) {
  std::lock_guard<std::mutex> lock(buf_lock_);
  if (frame_buffer_.size() > 5000000) frame_buffer_.clear();
  frame_buffer_.insert(frame_buffer_.end(), buf + offset, buf + length);
}

int LibAV::readPacket(void* opaque, uint8_t* buf, int buf_size) {
  LibAV* p = (LibAV*)opaque;
  if (p == NULL) {
    return -1;
  }

  std::lock_guard<std::mutex> lock(p->buf_lock_);
  buf_size = std::min(buf_size, int(p->frame_buffer_.size()));

  if (p->frame_buffer_.size() == 0) return 0;
  if (!buf_size) {
    return AVERROR_UNKNOWN;
  }

  // std::cout << "\033[36mbuffer size: " << p->frame_buffer_.size() << "
  // \033[0m"
  //           << std::endl;
  /* copy internal buffer data to buf */
  memcpy(buf, p->frame_buffer_.data(), buf_size);
  p->frame_buffer_.erase(p->frame_buffer_.begin(),
                         p->frame_buffer_.begin() + buf_size);

  return buf_size;
}

void LibAV::openThread() {
  int ret;
  int sleep = 1000 / av_q2d(vstrm_->codec->framerate) - 20;
  int curr_sleep = sleep;
  while (1) {
    if (mode_ == UDP_STREAM || mode_ == MANUAL_IO) {
      if (frame_buffer_.size() > 500000)
        curr_sleep = sleep - 5;
      else if (frame_buffer_.size() < 50000)
        curr_sleep = sleep + 5;
      std::this_thread::sleep_for(std::chrono::milliseconds(curr_sleep));

      waitForBuffer(min_frame_buffer_, 20);
    }
    // printBufferSize();
    // Read packet from input
    ret = av_read_frame(inctx_, &pkt_);
    if (ret < 0 && ret != AVERROR_EOF) {
      std::cerr << "Faill to read frame. ret=%X" << -ret << std::endl;
      av_packet_unref(&pkt_);
      continue;
    }
    if (ret == 0 && pkt_.stream_index != vstrm_idx_) {
      av_packet_unref(&pkt_);
      continue;
    }
    if (ret == AVERROR_EOF) {
      av_packet_unref(&pkt_);
    }

    // Decode video frame
    avcodec_decode_video2(vstrm_->codec, decframe_, &got_pic_, &pkt_);
    if (!got_pic_) {
      av_packet_unref(&pkt_);
      continue;
    }

    // Convert frame to OpenCV matrix
    {
      std::lock_guard<std::mutex> lock(image_lock_);
      sws_scale(swsctx_, decframe_->data, decframe_->linesize, 0,
                decframe_->height, frame_->data, frame_->linesize);

#ifdef USE_ROS
      // Write to msgs and pub
      sensor_msgs::ImagePtr ptr = boost::make_shared<sensor_msgs::Image>();

      cv::Mat image(dst_height_, dst_width_, CV_8UC3, framebuf_.data(),
                    frame_->linesize[0]);
      ptr = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
      img_pub_.publish(ptr);
#endif
    }
    av_packet_unref(&pkt_);
  }
}

cv::Mat LibAV::getMat() {
  // Convert frame to OpenCV matrix
  std::lock_guard<std::mutex> lock(image_lock_);
  cv::Mat image(dst_height_, dst_width_, CV_8UC3, framebuf_.data(),
                frame_->linesize[0]);
  return image;
}
