const sharp = require("sharp");
const ort = require("onnxruntime-node");

const classes = [
  "crazing",
  "inclusion",
  "patches",
  "pitted_suface",
  "rolled-in_scale",
  "scratches",
];
const numClasses = 6;

class Yolov5Detection {
  constructor() {
    this.session = null;
    this.inputName = null;
  }
  /**
   * 加载ONNX模型
   * @param {string} modelPath 模型路径
   */
  async createModel(modelPath) {
    try {
      this.session = await ort.InferenceSession.create(modelPath);
      this.inputName = this.session.inputNames[0];
      console.log("模型加载成功", modelPath);
    } catch (error) {
      console.error("模型加载失败", error);
      throw error;
    }
  }
  /**
   *
   * @param {*} imagePath 目标图像路径
   * @param {*} targetSize 目标图像大小
   * @returns
   */
  async preprocessImage(imagePath, targetSize) {
    // 使用sharp库读取图像，并调整大小为targetSize
    const image = await sharp(imagePath)
      .resize(targetSize, targetSize)
      .raw()
      .toBuffer({ resolveWithObject: true });

    // 获取图像数据和信息
    const { data, info } = image;
    // 将图像数据归一化到0-1之间
    const normalizedData = new Float32Array(data.length);
    for (let i = 0; i < data.length; i++) {
      normalizedData[i] = data[i] / 255.0;
    }

    // 定义图像的通道数、高度和宽度
    const channels = 3;
    const height = info.height;
    const width = info.width;
    // 创建一个Float32Array，用于存储图像数据
    const tensorData = new Float32Array(channels * height * width);

    // 将归一化后的图像数据存储到tensorData中
    for (let c = 0; c < channels; c++) {
      for (let y = 0; y < height; y++) {
        for (let x = 0; x < width; x++) {
          tensorData[c * height * width + y * width + x] =
            normalizedData[(y * width + x) * channels + c];
        }
      }
    }

    // 返回一个Tensor对象，用于存储图像数据
    return new ort.Tensor("float32", tensorData, [1, channels, height, width]);
  }

  /**
   *
   * @param {*} detections 检测框数据
   * @param {*} confidenceThreshold 置信度阈值
   * @param {*} nmsThreshold 非极大值抑制阈值
   * @returns
   */
  parseDetections(detections, confidenceThreshold, nmsThreshold) {
    // 计算检测框的数量
    const numDetections = Math.floor(detections.length / (5 + numClasses));
    // 存储解析后的检测框数据
    const detectionData = [];

    // 解析每个检测框并过滤低置信度的
    for (let i = 0; i < numDetections; i++) {
      // 计算当前检测框的偏移量
      const offset = i * (5 + numClasses);
      // 获取边界框坐标
      const bbox = detections.slice(offset, offset + 4);
      // 获取置信度
      const confidence = detections[offset + 4];
      // 获取类别概率
      const classProbs = detections.slice(offset + 5, offset + 5 + numClasses);

      // 确定类别
      const maxClassIndex = classProbs.indexOf(Math.max(...classProbs));

      // 如果置信度大于等于阈值，则保留该检测框
      if (confidence >= confidenceThreshold) {
        // 转换边界框坐标为像素值
        const xCenter = bbox[0];
        const yCenter = bbox[1];
        const width = bbox[2];
        const height = bbox[3];

        detectionData.push({
          class: classes[maxClassIndex],
          confidence: confidence,
          bbox: [
            xCenter - width / 2, // xmin
            yCenter - height / 2, // ymin
            xCenter + width / 2, // xmax
            yCenter + height / 2, // ymax
          ],
        });
      }
    }

    // 如果没有检测框，则返回空数组
    if (detectionData.length === 0) return [];

    // IoU计算函数
    const calculateIoU = (box1, box2) => {
      const [xmin1, ymin1, xmax1, ymax1] = box1;
      const [xmin2, ymin2, xmax2, ymax2] = box2;

      // 计算交集区域
      const x1 = Math.max(xmin1, xmin2);
      const y1 = Math.max(ymin1, ymin2);
      const x2 = Math.min(xmax1, xmax2);
      const y2 = Math.min(ymax1, ymax2);

      // 计算交集和并集面积
      const intersection = Math.max(0, x2 - x1) * Math.max(0, y2 - y1);
      const area1 = (xmax1 - xmin1) * (ymax1 - ymin1);
      const area2 = (xmax2 - xmin2) * (ymax2 - ymin2);
      const union = area1 + area2 - intersection;

      return union === 0 ? 0 : intersection / union;
    };

    // 按类别分组处理NMS
    const classGroups = detectionData.reduce((acc, box) => {
      acc[box.class] = acc[box.class] || [];
      acc[box.class].push(box);
      return acc;
    }, {});

    const finalData = [];
    Object.values(classGroups).forEach((group) => {
      // 按置信度降序排序
      group.sort((a, b) => b.confidence - a.confidence);

      let remainingBoxes = [...group];
      while (remainingBoxes.length > 0) {
        // 添加当前最高置信度的检测框
        const currentBox = remainingBoxes[0];
        finalData.push(currentBox);

        // 过滤与当前框重叠度高的检测框
        remainingBoxes = remainingBoxes.slice(1).filter((box) => {
          return calculateIoU(currentBox.bbox, box.bbox) <= nmsThreshold;
        });
      }
    });

    return finalData;
  }

  /**
   *
   * @param {*} imagePath 图片路径
   * @param {*} detectionData 检测框数据
   * @param {*} outputPath 图片输出路径
   * @param {*} targetSize 目标尺寸
   * @param {*} boxLineColor 框线颜色
   * @param {*} boxLineWidth 框线宽度
   * @param {*} boxTextColor 框上文字颜色
   * @param {*} boxTextSize 框上文字大小
   */
  async drawBoundingBoxes(
    imagePath,
    detectionData,
    outputPath,
    targetSize,
    boxLineColor,
    boxLineWidth,
    boxTextColor,
    boxTextSize
  ) {
    try {
      const image = sharp(imagePath).resize(targetSize, targetSize);

      const svgOverlays = detectionData.map((detection) => {
        const [xmin, ymin, xmax, ymax] = detection.bbox.map((v) =>
          Math.round(v)
        );
        const className = detection.class;
        const confidence = detection.confidence.toFixed(2);

        const svg = Buffer.from(`
                <svg width="${targetSize}" height="${targetSize}">
                    <rect x="${xmin}" y="${ymin}" width="${
          xmax - xmin
        }" height="${ymax - ymin}" 
                          stroke="${boxLineColor}" fill="none" stroke-width="${boxLineWidth}"/>
                    <text x="${xmin + 5}" y="${ymin + 20}" 
                          fill="${boxTextColor}" font-size="${boxTextSize}" font-family="Arial">
                        ${className} ${confidence}
                    </text>
                </svg>
            `);

        return { input: svg, left: 0, top: 0 };
      });

      await image.composite(svgOverlays).toFile(outputPath);
      console.log(`结果已保存到 ${outputPath}`);
    } catch (error) {
      console.error(`绘制边界框失败: ${error}`);
    }
  }

  /**
   *
   * @param {*} imagePath 图片路径
   * @param {*} targetSize 目标尺寸
   * @param {*} confidenceThreshold 置信阈值
   * @param {*} nmsThreshold 非极大值抑制阈值
   * @returns
   */
  async detect(imagePath, targetSize, confidenceThreshold, nmsThreshold) {
    try {
      const inputTensor = await this.preprocessImage(imagePath, targetSize);
      const feeds = { images: inputTensor };
      const results = await this.session.run(feeds);
      // 获取输出
      const outputTensor = results.output0;
      const detections = outputTensor.data;
      // 解析检测结果
      const detectionData = this.parseDetections(
        detections,
        confidenceThreshold,
        nmsThreshold
      );
      return detectionData;
    } catch (error) {
      console.error("推理出错", error);
      return null;
    }
  }
}

module.exports = Yolov5Detection;
