#include "Server.h"
#include "base64.h"
#include "process.h"
#include <cstring>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include "CommandParameters.h"

class PointSort
{
public:
  bool operator()(cv::Point2f coordinates1, cv::Point2f coordinates2)
  {
    if (std::abs(coordinates1.x - coordinates2.x) > 40)
    {
      return coordinates1.x < coordinates2.x;
    }
    else if (std::abs(coordinates1.y - coordinates2.y) > 40)
    {
      return coordinates1.y < coordinates2.y;
    }
    else
    {
      return coordinates1.x + coordinates1.y < coordinates2.x + coordinates2.y;
    }
  }
};

void SetCameraParam(HikCamera &camera)
{
  camera.StopGrabbing();
  camera.SetWidthAndHeight(getCamWHParam("WidthValue"),
                           getCamWHParam("HeightValue"));
  camera.SetOffsetXAndOffsetY(getCamXYParam("OffsetX"),
                              getCamXYParam("OffsetX"));
  camera.StartGrabbing();
}

void Display_server()
{
  IsServerStart = true;

  cv::Mat img;
  ImageHandle imagehander;

  std::cout << "Display start!" << std::endl;
  do
  {
    img = camera.GetOneFrame();
    if (img.channels() == 0 || img.empty())
    {
      continue;
    }
    cv::Mat resizedFrame1;
    cv::resize(img, resizedFrame1, IMAGE_SIZE);
    // std::string frametest = imagehander.Image2json(img);
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
  } while (IsServerStart);
  std::cout << "Display finish!" << std::endl;
}

void Color2Gray_server()
{
  IsServerStart = true;
  cv::Mat img;
  ImageHandle imagehander;

  std::cout << "Color2Gray start!" << std::endl;
  do
  {
    img = camera.GetOneFrame();
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }
    cv::Mat frame = imagehander.Color2Gray(img);
    cv::Mat resizedFrame1;
    cv::resize(frame, resizedFrame1, IMAGE_SIZE); // max_cv::Size 500*500
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    int bytesSent = send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);

    // sleep(1);
  } while (IsServerStart);
  std::cout << "Color2Gray finish!" << std::endl;
}

void Binarization_server()
{

  IsServerStart = true;
  cv::Mat img;
  ImageHandle imagehander;

  std::cout << "Binarization start!" << std::endl;
  do
  {
    img = camera.GetOneFrame();
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }
    int thresh = getBinarParam("ThresholdValue1");
    int maxval = getBinarParam("ThresholdValue2");

    cv::Mat frame = imagehander.Binarization(img, thresh, maxval);
    cv::Mat resizedFrame1;
    cv::resize(frame, resizedFrame1, IMAGE_SIZE); // max_cv::Size 500*500
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
    // sleep(1);
  } while (IsServerStart);
  std::cout << "Binarization finish!" << std::endl;
}

std::string JSONPointLocation(std::vector<cv::Point2f> coordinates)
{
  Json::Value root;
  Json::FastWriter fastWriter;
  for (int i = 0; i < coordinates.size(); ++i)
  {
    Json::Value coord;
    coord["PointNumber"] = i + 1;
    coord["PixX"] = coordinates[i].x;
    coord["PixY"] = coordinates[i].y;
    root.append(coord);
  }
  std::string JSONPointLocationString = fastWriter.write(root);
  return JSONPointLocationString;
}

void CircleRecognition_server()
{
  IsServerStart = true;
  ImageHandle imagehander;
  std::cout << "CircleRecognition Start:  " << std::endl;
  do
  {
    cv::Mat img = camera.GetOneFrame();
    cv::Mat img_raw = img.clone();
    cv::Rect roiRect = cv::Rect(0, 0, img_raw.cols, img_raw.rows);
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }

    // 使用 find 方法查找是否存在ROI区域设置
    auto it = Circle_ROI.find("ROI");
    if (it != Circle_ROI.end())
    {
      auto ROI = getCircleROI("ROI");

      // 定义感兴趣的区域 (ROI)
      roiRect = cv::Rect(ROI["X"], ROI["Y"], ROI["Width"], ROI["Height"]);
      img = img(roiRect);
    }
    std::vector<cv::Vec3f> circles;
    DetectionResult result;

    // 配置圆查找参数
    int minDist = getCircleParam("MinDist");        // 检测到的圆之间的最小距离 大于1的整数
    int Canny_param = getCircleParam("CannyParam"); // Canny边缘检测阈值 1-255
    int hough_param = getCircleParam("HoughParam"); // 霍夫变换中圆心累加器的阈值 大于1
    int minRadius = getCircleParam("MinRadius");    // 检测到的圆的最小半径
    int maxRadius = getCircleParam("MaxRadius");    // 检测到的圆的最大半径

    result = imagehander.CircleRecognition(img_raw, roiRect, img, circles, minDist, Canny_param,
                                           hough_param, minRadius, maxRadius);
    updateCameraCalibrations(result.coordinates);
    if (IsGetCirclePoint)
    {
      IsGetCirclePoint = false;
      std::sort(result.coordinates.begin(), result.coordinates.end(), PointSort());
      std::string JSONPoints = JSONPointLocation(result.coordinates);
      std::vector<uint8_t> encoded_JSONPoints = EncodeMessage(SEND_CAMERA_CALIBRATION, JSONPoints);

      send(Upper_Computer_fd, encoded_JSONPoints.data(), encoded_JSONPoints.size(), 0);
      std::cout << "CircleRecognition 点位信息已发送。" << std::endl;
    }

    // if (!getRobotGrasping())
    // {
    //   updateCameraCalibrations(result.coordinates);
    // }

    cv::Mat resizedFrame1;
    cv::resize(result.img, resizedFrame1, IMAGE_SIZE);
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);

  } while (IsServerStart);
  std::cout << "The circle detection has finished running !" << std::endl;
}

void RectangleRecognition_server()
{
  IsServerStart = true;
  ImageHandle imagehander;
  std::cout << "RectangleRecognition开始 Start:  " << std::endl;
  do
  {
    cv::Mat img = camera.GetOneFrame();
    cv::Mat img_raw = img.clone();
    cv::Rect roiRect = cv::Rect(0, 0, img_raw.cols, img_raw.rows);
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }

    // 使用 find 方法查找是否存在ROI区域设置
    auto it = Rentangle_ROI.find("ROI");
    if (it != Rentangle_ROI.end())
    {
      auto ROI = getRentangleROI("ROI");

      // 定义感兴趣的区域 (ROI)
      roiRect = cv::Rect(ROI["X"], ROI["Y"], ROI["Width"], ROI["Height"]);
      img = img(roiRect);
    }

    DetectionResult result;
    int ThresholdValue1 = getRentangleParam("ThresholdValue1");
    int ThresholdValue2 = getRentangleParam("ThresholdValue2");
    int MinArea = getRentangleParam("MinArea");
    int MaxArea = getRentangleParam("MaxArea");

    result = imagehander.RectangleRecognition(img,
                                              ThresholdValue1,
                                              ThresholdValue2,
                                              MinArea,
                                              MaxArea); // 矩形识别

    updateDetectionResult(result, img_raw, roiRect);
    updateCameraCalibrations(result.coordinates);

    cv::Mat resizedFrame1;
    cv::resize(result.img, resizedFrame1, IMAGE_SIZE);
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    int bytesSent = send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
  } while (IsServerStart);
  std::cout << "The Rentangle detection has finished running !" << std::endl;
}

void Template_server()
{

  cv::Mat img;
  ImageHandle imagehander;

  do
  {
    img = camera.GetOneFrame();
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }
    cv::Mat resizedFrame1;
    cv::resize(img, resizedFrame1, IMAGE_SIZE);
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(TEMPLATE_ONE_FRAME_RETURN, frametest);
    send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
    std::cout << "Template_server 已发送图像帧。" << std::endl;
    templateImg = img;

  } while (0);
}

void CreateTemplate_server()
{
  TemplateParam template_P = gettemplateP();
  int LUPointX, LUPointY, RDPointX, RDPointY;
  if (template_P.TemplatePoints["Point1"]["PointX"] <
      template_P.TemplatePoints["Point2"]["PointX"])
  {
    LUPointX = template_P.TemplatePoints["Point1"]["PointX"];
    RDPointX = template_P.TemplatePoints["Point2"]["PointX"];
  }
  else
  {
    LUPointX = template_P.TemplatePoints["Point2"]["PointX"];
    RDPointX = template_P.TemplatePoints["Point1"]["PointX"];
  }

  if (template_P.TemplatePoints["Point1"]["PointY"] <
      template_P.TemplatePoints["Point2"]["PointY"])
  {
    LUPointY = template_P.TemplatePoints["Point1"]["PointY"];
    RDPointY = template_P.TemplatePoints["Point2"]["PointY"];
  }
  else
  {
    LUPointY = template_P.TemplatePoints["Point2"]["PointY"];
    RDPointY = template_P.TemplatePoints["Point1"]["PointY"];
  }

  // 计算缩放比例
  double scaleWidth =
      static_cast<double>(getCamWHParam("WidthValue")) / IMAGE_SIZE.width;
  double scaleHeight =
      static_cast<double>(getCamWHParam("HeightValue")) / IMAGE_SIZE.height;

  cv::Rect rect(cvRound(LUPointX * scaleWidth), cvRound(LUPointY * scaleHeight), cvRound((RDPointX - LUPointX) * scaleWidth), cvRound((RDPointY - LUPointY) * scaleHeight));
  // cv::Mat cropped_img_tmp = templateImg(rect).clone();
  // cropped_img = cropped_img_tmp;

  cv::Mat cropped_img_tmp;
  cropped_img = templateImg(rect).clone();
  cv::resize(cropped_img, cropped_img_tmp, IMAGE_SIZE);
  // cropped_img = cropped_img_tmp;

  ImageHandle imagehander;
  std::string frametest = imagehander.Image2json(cropped_img_tmp);
  std::vector<uint8_t> encodedData = EncodeMessage(0x55a2, frametest);
  send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
  std::cout << "CreateTemplate_server image 已发送图像帧。" << std::endl;
}

void TemplateMatch_Server()
{
  IsServerStart = true;
  ImageHandle imagehander;
  DetectionResult result;
  std::cout << "TemplateMatch Start:  " << std::endl;

  do
  {
    cv::Mat img = camera.GetOneFrame();
    cv::Mat img_raw = img.clone();
    cv::Rect roiRect = cv::Rect(0, 0, img_raw.cols, img_raw.rows);
    if (img.channels() == 0 || img.empty())
    {
      logMessage("image is empty !");
      sleep(5);
      continue;
    }

    // 使用 find 方法查找是否存在ROI区域设置
    auto it = Template_ROI.find("ROI");
    if (it != Template_ROI.end())
    {
      auto ROI = getTemplateROI("ROI");
      // 定义感兴趣的区域 (ROI)
      roiRect = cv::Rect(ROI["X"], ROI["Y"], ROI["Width"], ROI["Height"]);
      img = img(roiRect);
    }

    result = imagehander.PatternRecognition(img, getinputTempl());
    updateDetectionResult(result, img_raw, roiRect);
    updateCameraCalibrations(result.coordinates);

    cv::Mat resizedFrame1;
    cv::resize(result.img, resizedFrame1, IMAGE_SIZE);
    std::string frametest = imagehander.Image2json(resizedFrame1);
    std::vector<uint8_t> encodedData = EncodeMessage(IMAGE_SEND, frametest);
    send(Upper_Computer_fd, encodedData.data(), encodedData.size(), 0);
  } while (IsServerStart);
  std::cout << "The TemplateMatch has finished running !" << std::endl;
}

void updateDetectionResult(DetectionResult &result, cv::Mat &img_raw, cv::Rect &roiRect)
{
  for (int i = 0; i < result.coordinates.size(); ++i)
  {
    result.coordinates[i] = result.coordinates[i] + cv::Point2f(roiRect.tl());
  }

  result.img.copyTo(img_raw(roiRect));
  result.img = img_raw;
}
