const sharp = require("sharp");
const ort = require("onnxruntime-node");

const classes = [
  "crazing",
  "inclusion",
  "patches",
  "pitted_surface",
  "rolled-in_scale",
  "scratches",
];
const numClasses = 6;

class Yolov8Detection {
  constructor() {
    this.session = null;
    this.inputName = null;
  }
  /**
   * 加载ONNX模型
   * @param {string} modelPath 模型路径
   */
  async createModel(modelPath) {
    try {
      this.session = await ort.InferenceSession.create(modelPath);
      this.inputName = this.session.inputNames[0];
      console.log("模型加载成功", modelPath);
    } catch (error) {
      console.error("模型加载失败", error);
      throw error;
    }
  }
  /**
   *
   * @param {*} imagePath 目标图像路径
   * @param {*} targetSize 目标图像大小
   * @returns
   */
  async preprocessImage(imagePath, targetSize) {
    // 使用sharp库读取图像，并调整大小为targetSize
    const image = await sharp(imagePath)
      .resize(targetSize, targetSize)
      .raw()
      .toBuffer({ resolveWithObject: true });

    // 获取图像数据和信息
    const { data, info } = image;
    // 将图像数据归一化到0-1之间
    const normalizedData = new Float32Array(data.length);
    for (let i = 0; i < data.length; i++) {
      normalizedData[i] = data[i] / 255.0;
    }

    // 定义图像的通道数、高度和宽度
    const channels = 3;
    const height = info.height;
    const width = info.width;
    // 创建一个Float32Array，用于存储图像数据
    const tensorData = new Float32Array(channels * height * width);

    // 将归一化后的图像数据存储到tensorData中
    for (let c = 0; c < channels; c++) {
      for (let y = 0; y < height; y++) {
        for (let x = 0; x < width; x++) {
          tensorData[c * height * width + y * width + x] =
            normalizedData[(y * width + x) * channels + c];
        }
      }
    }

    // 返回一个Tensor对象，用于存储图像数据
    return new ort.Tensor("float32", tensorData, [1, channels, height, width]);
  }

  /**
   * 解析检测结果
   * @param {Float32Array} detections 模型输出的检测结果数组
   * @param {number} confidenceThreshold 置信度阈值
   * @param {number} nmsThreshold 非极大值抑制阈值
   * @returns {Array} 解析后的检测结果数组
   */
  parseDetections(detections, confidenceThreshold, nmsThreshold) {
    if (!detections || detections.length === 0) {
      return [];
    }
    const results = [];

    // 遍历所有预定义的300个检测框
    for (let i = 0; i < 300; i++) {
      const baseIndex = i * 6; // 每个检测框占6个元素

      // 解析坐标和置信度（假设输出格式为x1,y1,x2,y2）
      const x1 = detections[baseIndex];
      const y1 = detections[baseIndex + 1];
      const x2 = detections[baseIndex + 2];
      const y2 = detections[baseIndex + 3];
      const confidence = detections[baseIndex + 4];

      // 过滤低置信度检测
      if (confidence < confidenceThreshold) continue;

      // 解析类别ID（四舍五入确保为整数）
      const classId = Math.round(detections[baseIndex + 5]);

      // 验证类别有效性
      if (classId < 0 || classId >= classes.length) continue;

      // 收集有效结果
      results.push({
        bbox: [x1, y1, x2, y2], // 边界框坐标
        confidence: confidence, // 检测置信度
        class: classes[classId], // 类别名称
      });
    }

    // 按置信度降序排序（可选）
    return results.sort((a, b) => b.confidence - a.confidence);
  }

  /**
   *
   * @param {*} imagePath 图片路径
   * @param {*} detectionData 检测框数据
   * @param {*} outputPath 图片输出路径
   * @param {*} targetSize 目标尺寸
   * @param {*} boxLineColor 框线颜色
   * @param {*} boxLineWidth 框线宽度
   * @param {*} boxTextColor 框上文字颜色
   * @param {*} boxTextSize 框上文字大小
   */
  async drawBoundingBoxes(
    imagePath,
    detectionData,
    outputPath,
    targetSize,
    boxLineColor,
    boxLineWidth,
    boxTextColor,
    boxTextSize
  ) {
    try {
      const image = sharp(imagePath).resize(targetSize, targetSize);

      const svgOverlays = detectionData.map((detection) => {
        const [xmin, ymin, xmax, ymax] = detection.bbox.map((v) =>
          Math.round(v)
        );
        const className = detection.class;
        const confidence = detection.confidence.toFixed(2);

        const svg = Buffer.from(`
                  <svg width="${targetSize}" height="${targetSize}">
                      <rect x="${xmin}" y="${ymin}" width="${
          xmax - xmin
        }" height="${ymax - ymin}" 
                            stroke="${boxLineColor}" fill="none" stroke-width="${boxLineWidth}"/>
                      <text x="${xmin + 5}" y="${ymin + 20}" 
                            fill="${boxTextColor}" font-size="${boxTextSize}" font-family="Arial">
                          ${className} ${confidence}
                      </text>
                  </svg>
              `);

        return { input: svg, left: 0, top: 0 };
      });

      await image.composite(svgOverlays).toFile(outputPath);
      console.log(`结果已保存到 ${outputPath}`);
    } catch (error) {
      console.error(`绘制边界框失败: ${error}`);
    }
  }

  /**
   *
   * @param {*} imagePath 图片路径
   * @param {*} targetSize 目标尺寸
   * @param {*} confidenceThreshold 置信阈值
   * @param {*} nmsThreshold 非极大值抑制阈值
   * @returns
   */
  async detect(imagePath, targetSize, confidenceThreshold, nmsThreshold) {
    try {
      const inputTensor = await this.preprocessImage(imagePath, targetSize);
      const feeds = { images: inputTensor };
      const results = await this.session.run(feeds);
      // 获取输出
      const outputTensor = results.output0;
      const detections = outputTensor.data;
      // 解析检测结果
      const detectionData = this.parseDetections(
        detections,
        confidenceThreshold,
        nmsThreshold
      );
      return detectionData;
    } catch (error) {
      console.error("推理出错", error);
      return null;
    }
  }
}

module.exports = Yolov8Detection;
