/**
 * @file main.cpp
 * @author SDK_TEAM
 * @brief
 * @version 0.1
 * @date 2022-10-26
 *
 * Copyright:
 * © 2018 北京灵汐科技有限公司 版权所有。
 * 注意：以下内容均为北京灵汐科技有限公司原创，未经本公司允许，不得转载，否则将视为侵权；对于不遵守此声明或者其他违法使用以下内容者，本公司依法保留追究权。
 * © 2018 Lynxi Technologies Co., Ltd. All rights reserved.
 * NOTICE: All information contained here is, and remains the property of Lynxi.
 * This file can not be copied or distributed without the permission of Lynxi
 * Technologies Co., Ltd.
 *
 */

#include <lyn_api.h>
#include <lyn_plugin.h>
#include <unistd.h>
#include <iostream>
#include "Channel.hpp"
#include "argsParser.h"
#include "blockQueue.hpp"
#include "boxInfoEncode.h"
#include "bufferPool.hpp"
#include "framePool.hpp"
#include "frameRater.h"
#include "ipeParamModel.h"
#include "opencvWindow.hpp"
#include "sys/time.h"
#include "util.hpp"
#include "videoDecoder.h"
#include "videoEncoder.h"

using namespace std;

struct CALLBACKDATA_T {
  std::vector<Channel *> *channels;
  lynFrame_t *result;
  FramePool *recvPool;
  std::vector<lynFrame_t *> frames;
  std::vector<ImageMergeInfo> *mergeInfos;
  FrameRater *frameRater;
  std::string windowName;
};

#define WINDOW_WIDTH 1920
#define WINDOW_HEIGH 1080
#define CAPACITY_LIMIT 190

InputType g_inputType = File;

// key: chip, value: channel
// 用来记录每个芯片上运行了几个 channel
static std::map<uint16_t, uint16_t> g_chipChannlMap;

void MergeAndDisplay(lynContext_t *context, std::string windowName,
                     std::vector<Channel *> &channels) {
  // 1. 设置 context，并创建 stream 等资源
  CHECK_ERR(lynSetCurrentContext(*context));
  lynStream_t stream;
  lynCreateStream(&stream);
  IpeParamModel ipeMerge(0, 0);
  ipeMerge.SetImgInfo(WINDOW_WIDTH, WINDOW_HEIGH, LYN_PIX_FMT_NV12);

  OpencvWindowManager::GetInstance()->SetWindowSize(windowName, WINDOW_HEIGH,
                                                    WINDOW_WIDTH);
  // 2. 根据设置的 Channel 数，决定使用几宫格进行显示，例如 Channel = 9，则为 9
  // 宫格， Channel = 10，则为 16 宫格
  int channelNumber = channels.size();
  int window = sqrt(channelNumber);
  window += pow(window, 2) < channelNumber ? 1 : 0;
  std::cout << "window: " << window << std::endl;
  std::vector<ImageMergeInfo> mergeInfos;
  for (int h = 0; h < window; ++h) {
    for (int w = 0; w < window; ++w) {
      ImageMergeInfo mergeInfo;
      // 计算视频播放的位置
      mergeInfo.x = w * (WINDOW_WIDTH / window);
      mergeInfo.y = h * (WINDOW_HEIGH / window);
      mergeInfo.width = WINDOW_WIDTH / window;
      mergeInfo.heigh = WINDOW_HEIGH / window;
      mergeInfo.frame = nullptr;
      mergeInfos.emplace_back(mergeInfo);
    }
  }
  // 3. 将 recvPool 中的视频数据初始化为黑色背景，并画上划分宫格的分割线
  lynPlugin_t plugin;
  lynPluginRegister(&plugin, ArgsParse::argOsdPluginPath.c_str());
  lynDrawBoxAndTextPara drawPara;
  drawPara.imgFmt = LYN_PIX_FMT_NV12;
  drawPara.imgW = WINDOW_WIDTH;
  drawPara.imgH = WINDOW_HEIGH;
  lynBoxesInfo boxesInfo;
  boxesInfo.boxesNum = mergeInfos.size();
  for (uint32_t i = 0; i < boxesInfo.boxesNum; ++i) {
    boxesInfo.boxes[i].xmin = mergeInfos[i].x;
    boxesInfo.boxes[i].xmax = mergeInfos[i].x + mergeInfos[i].width;
    boxesInfo.boxes[i].ymin = mergeInfos[i].y;
    boxesInfo.boxes[i].ymax = mergeInfos[i].y + mergeInfos[i].heigh;
    snprintf(boxesInfo.boxes[i].label, 2, " ");
  }
  void *drawDev;
  CHECK_ERR(lynMalloc((void **)&drawDev, sizeof(lynBoxesInfo)));
  lynMemcpy(drawDev, &boxesInfo, sizeof(lynBoxesInfo), ClientToServer);
  drawPara.boxesInfo = (lynBoxesInfo *)drawDev;
  FramePool recvPool;
  int poolSize = 1;
  recvPool.init(WINDOW_HEIGH * WINDOW_WIDTH * 3 / 2, poolSize);
  std::vector<lynCodecBuf_t *> tmpVec;
  for (int i = 0; i < poolSize; ++i) {
    auto tmp = recvPool.Pop();
    // 将背景设为黑色
    CHECK_ERR(lynMemset(tmp->data, 0, WINDOW_WIDTH * WINDOW_HEIGH));
    CHECK_ERR(lynMemset(tmp->data + WINDOW_WIDTH * WINDOW_HEIGH, 128,
                        WINDOW_WIDTH * WINDOW_HEIGH * 0.5));
    drawPara.imgData = tmp->data;
    // 画上分割多宫格线
    CHECK_ERR(lynPluginRunAsync(stream, plugin, "lynDrawBoxAndText", &drawPara,
                                sizeof(drawPara)));
    CHECK_ERR(lynSynchronizeStream(stream));
    tmpVec.emplace_back(
        tmp);  // 将所有从 recvPool 中的数据保存起来，等待全部处理完再归还
  }
  for (auto &tmp : tmpVec) {
    recvPool.Push(tmp);
  }
  CHECK_ERR(lynPluginUnregister(plugin));
  CHECK_ERR(lynFree(drawDev));

  bool eos = false;
  // 在线流的超时时间设置为 1ms，避免某一路断连，影响其他路
  int getFrameTimeout = (g_inputType == File) ? 60000 : 1;
  while (!eos) {
    CALLBACKDATA_T *callbackData = new CALLBACKDATA_T;
    callbackData->channels = &channels;
    callbackData->recvPool = &recvPool;
    // 4. 遍历每个 channel 获取处理好的视频帧
    for (size_t i = 0; i < channels.size(); i++) {
      lynFrame_t *frame = nullptr;
      bool ret = channels[i]->getFrame(&frame, getFrameTimeout);
      channels[i]->getVideoInfo(mergeInfos[i].videoInfo);
      mergeInfos[i].frame = frame;
      callbackData->frames.emplace_back(frame);
      eos = ret ? frame->eos : eos;
    }
    callbackData->mergeInfos = &mergeInfos;
    callbackData->result = recvPool.Pop();
    callbackData->windowName = windowName;
    // 5. 将所有的视频帧合并拼接到一个画布上
    ipeMerge.MergeImage(stream, callbackData->result->data, mergeInfos);
    lynStreamAddCallback(stream,
                         [](void *userData) -> lynError_t {
                           CALLBACKDATA_T *data = (CALLBACKDATA_T *)userData;
                           for (size_t i = 0; i < data->channels->size(); ++i) {
                             if (data->frames[i] != nullptr) {
                               // 将视频帧归还给对应的 Channel
                               data->channels->at(i)->putFrame(data->frames[i]);
                             }
                           }
                           // 显示图像
                           OpencvWindowManager::GetInstance()->ShowDevicesData(
                               data->result, data->windowName);
                           data->recvPool->Push(data->result);
                           delete data;
                           return 0;
                         },
                         callbackData);
    if (g_inputType ==
        Stream) {  // 由于上面超时时间只设置为 1ms，取太快的话取不到相应的帧
      // std::this_thread::sleep_for(std::chrono::milliseconds(60));
    }
  }
  // 等待所有的任务处理完
  CHECK_ERR(lynSynchronizeStream(stream));
  CHECK_ERR(lynDestroyStream(stream));
  OpencvWindowManager::GetInstance()->Close(windowName);
}

ModelType getModelType(std::string &modelPath) {
  std::string path = to_lower(modelPath);
  if (path.find("yolov5") != std::string::npos) {
    return ModelType::Yolov5;
  }

  if (path.find("resnet50") != std::string::npos) {
    return ModelType::ResNet50;
  }

  loge("get modelType fail!!!\n");
  return ModelType::UnknownModel;
}

bool parseConfigJson(std::vector<lynContext_t *> &contextVec,
                     std::vector<ChannelInfo> &channelInfoVec) {
  std::ifstream f(ArgsParse::argJsonPath);
  json j = json::parse(f);
  try {
    int showType = j["showType"].get<int>();
    ArgsParse::argShowType = ShowType(showType);
    std::string postpluginPath = j["postpluginPath"].get<std::string>();
    std::string osdpluginPath = j["osdpluginPath"].get<std::string>();
    ArgsParse::argPostPluginPath = postpluginPath;
    ArgsParse::argOsdPluginPath = osdpluginPath;
    auto configJson = j["config"];
    for (auto &config : configJson) {
      int devicesID = config["devicesID"].get<int>();
      if (ArgsParse::checkChip(devicesID) == false) {
        return false;
      }
      uint16_t channelSize = 0;
      lynContext_t *context = new lynContext_t;
      CHECK_ERR(lynCreateContext(context, devicesID));
      CHECK_ERR(lynRegisterErrorHandler(StreamErrorHandler, nullptr));
      auto channelJson = config["channels"];
      for (auto &channel : channelJson) {
        int channelNumber = channel["channel"].get<int>();
        channelSize += channelNumber;
        for (int number = 0; number < channelNumber; ++number) {
          ChannelInfo channelInfo;
          channelInfo.context = context;
          channelInfo.deviceID = devicesID;
          std::string s = channel["output"].get<std::string>();
          channelInfo.channelName = s + "_device" + std::to_string(devicesID) +
                                    "_channel" + std::to_string(number);
          channelInfo.inputPath = channel["input"].get<std::string>();
          channelInfo.outputPath = channelInfo.channelName;
          channelInfo.modelPath = channel["modelPath"].get<std::string>();
          channelInfo.osdpluginPath = osdpluginPath;
          channelInfo.postpluginPath = postpluginPath;
          channelInfo.showType = ShowType(showType);
          channelInfo.modelType = getModelType(channelInfo.modelPath);
          channelInfo.inputType =
              (access(channelInfo.inputPath.c_str(), F_OK) == 0) ? File
                                                                 : Stream;
          g_inputType = channelInfo.inputType;
          channelInfoVec.emplace_back(std::move(channelInfo));
        }
      }
      contextVec.emplace_back(context);
      g_chipChannlMap.emplace(devicesID, channelSize);
    }
  } catch (...) {
    loge("parse json fail!!!\n");
    return false;
  }
  return true;
}

int main(int argc, char *argv[]) {
  // 1. 命令行解析
  if (ArgsParse::yolov5ArgsParser(argc, argv) == false) {
    return -1;
  }

  // 2. 创建 context 与生成 channelInfo
  std::vector<lynContext_t *> contextVec;
  std::vector<ChannelInfo> channelInfoVec;
  if (ArgsParse::argJsonPath.empty()) {  // 直接根据命令行设置的参数进行配置
    std::string strVideoPath = ArgsParse::argInputFilePath;
    std::string strVideoOutTmp =
        strVideoPath.substr(0, strVideoPath.rfind('.'));
    auto inputType = (access(strVideoPath.c_str(), F_OK) == 0) ? File : Stream;
    if (inputType == Stream) {  // 替换输出路径
      ReplacePath(strVideoOutTmp, '/', '_');
      ReplacePath(strVideoOutTmp, ':', '_');
    }
    for (uint32_t i = 0; i < ArgsParse::argChipsVec.size(); ++i) {
      lynContext_t *context = new lynContext_t;
      CHECK_ERR(lynCreateContext(context, ArgsParse::argChipsVec[i]));
      CHECK_ERR(lynRegisterErrorHandler(StreamErrorHandler, nullptr));
      for (uint32_t j = 0; j < ArgsParse::argChannel; ++j) {
        ChannelInfo channelInfo;
        channelInfo.context = context;
        channelInfo.deviceID = ArgsParse::argChipsVec[i];
        channelInfo.channelName = "device" +
                                  std::to_string(ArgsParse::argChipsVec[i]) +
                                  "_channel" + std::to_string(j);
        channelInfo.inputPath = strVideoPath;
        channelInfo.outputPath =
            strVideoOutTmp + "_Plugin_" + channelInfo.channelName;
        channelInfo.modelPath = ArgsParse::argModelPath;
        channelInfo.postpluginPath = ArgsParse::argPostPluginPath;
        channelInfo.osdpluginPath = ArgsParse::argOsdPluginPath;
        channelInfo.showType = ArgsParse::argShowType;
        channelInfo.modelType = getModelType(ArgsParse::argModelPath);
        channelInfo.inputType = inputType;
        g_inputType = channelInfo.inputType;
        channelInfoVec.emplace_back(std::move(channelInfo));
      }
      contextVec.emplace_back(context);
      g_chipChannlMap.emplace(ArgsParse::argChipsVec[i], ArgsParse::argChannel);
    }
  } else {  // 解析 JSON 文件进行配置
    if (parseConfigJson(contextVec, channelInfoVec) == false) {
      quick_exit(-1);
    }
  }

  // 如果选择直接显示
  std::string windowName("model-plugin");
  if (ArgsParse::argShowType == ShowType::DirectShow) {
    if (contextVec.size() >
        1) {  // 每个芯片只创建一个 context，所以使用此队列进行判断
      std::cout << "DirectShow only support single chip!" << std::endl;
      quick_exit(0);
    }
    OpencvWindowManager::GetInstance()->SetWindowName(windowName);
  }

  // 3. 为每一个通道创建线程
  std::vector<Channel *> vecChannel;
  for (auto channelInfo : channelInfoVec) {
    if (ArgsParse::argShowType == ShowType::OnlyApu &&
        channelInfo.modelType == ModelType::ResNet50) {
      std::cout << "ResNet50 not support save json mode!" << std::endl;
      quick_exit(0);
    }

    Channel *channel = new Channel();
    channelInfo.maxFps = CAPACITY_LIMIT / g_chipChannlMap[channelInfo.deviceID];
    channel->Init(channelInfo);
    vecChannel.emplace_back(channel);
  }

  // 4. 等待线程结束
  if (ArgsParse::argShowType == ShowType::DirectShow) {
    std::thread mergeThread(MergeAndDisplay, contextVec[0], windowName,
                            std::ref(vecChannel));
    OpencvWindowManager::GetInstance()->Process();
    mergeThread.join();
  }

  for (auto channel : vecChannel) {
    channel->Close();
    delete channel;
    channel = nullptr;
  }

  // 5. 释放资源
  for (auto &devCtx : contextVec) {
    if (devCtx) {
      CHECK_ERR(lynSetCurrentContext(*devCtx));
      CHECK_ERR(lynDestroyContext(*devCtx));
      delete devCtx;
    }
  }

  return 0;
}
