// npm install sharp onnxruntime-node
// 运行 node detection.js
const sharp = require("sharp");
const ort = require("onnxruntime-node");
const fs = require("fs");
const path = require("path");
const { console } = require("inspector");

const classes = [
  "crazing",
  "inclusion",
  "patches",
  "pitted_suface",
  "rolled-in_scale",
  "scratches",
];

// 异步函数，用于预处理图像
async function preprocessImage(imagePath, targetSize) {
  // 使用sharp库读取图像，并调整大小为targetSize
  const image = await sharp(imagePath)
    .resize(targetSize, targetSize)
    .raw()
    .toBuffer({ resolveWithObject: true });

  // 获取图像数据和信息
  const { data, info } = image;
  // 将图像数据归一化到0-1之间
  const normalizedData = new Float32Array(data.length);
  for (let i = 0; i < data.length; i++) {
    normalizedData[i] = data[i] / 255.0;
  }

  // 定义图像的通道数、高度和宽度
  const channels = 3;
  const height = info.height;
  const width = info.width;
  // 创建一个Float32Array，用于存储图像数据
  const tensorData = new Float32Array(channels * height * width);

  // 将归一化后的图像数据存储到tensorData中
  for (let c = 0; c < channels; c++) {
    for (let y = 0; y < height; y++) {
      for (let x = 0; x < width; x++) {
        tensorData[c * height * width + y * width + x] =
          normalizedData[(y * width + x) * channels + c];
      }
    }
  }

  // 返回一个Tensor对象，用于存储图像数据
  return new ort.Tensor("float32", tensorData, [1, channels, height, width]);
}

// 解析检测框数据，并过滤低置信度的检测框
function parseDetections(detections, numClasses) {
  // 计算检测框的数量
  const numDetections = Math.floor(detections.length / (5 + numClasses));
  // 存储解析后的检测框数据
  const detectionData = [];

  // 解析每个检测框并过滤低置信度的
  for (let i = 0; i < numDetections; i++) {
    // 计算当前检测框的偏移量
    const offset = i * (5 + numClasses);
    // 获取边界框坐标
    const bbox = detections.slice(offset, offset + 4);
    // 获取置信度
    const confidence = detections[offset + 4];
    // 获取类别概率
    const classProbs = detections.slice(offset + 5, offset + 5 + numClasses);

    // 确定类别
    const maxClassIndex = classProbs.indexOf(Math.max(...classProbs));

    // 如果置信度大于等于阈值，则保留该检测框
    if (confidence >= confidenceThreshold) {
      // 转换边界框坐标为像素值
      const xCenter = bbox[0];
      const yCenter = bbox[1];
      const width = bbox[2];
      const height = bbox[3];

      detectionData.push({
        class: classes[maxClassIndex],
        confidence: confidence,
        bbox: [
          xCenter - width / 2, // xmin
          yCenter - height / 2, // ymin
          xCenter + width / 2, // xmax
          yCenter + height / 2, // ymax
        ],
      });
    }
  }

  // 如果没有检测框，则返回空数组
  if (detectionData.length === 0) return [];

  // IoU计算函数
  const calculateIoU = (box1, box2) => {
    const [xmin1, ymin1, xmax1, ymax1] = box1;
    const [xmin2, ymin2, xmax2, ymax2] = box2;

    // 计算交集区域
    const x1 = Math.max(xmin1, xmin2);
    const y1 = Math.max(ymin1, ymin2);
    const x2 = Math.min(xmax1, xmax2);
    const y2 = Math.min(ymax1, ymax2);

    // 计算交集和并集面积
    const intersection = Math.max(0, x2 - x1) * Math.max(0, y2 - y1);
    const area1 = (xmax1 - xmin1) * (ymax1 - ymin1);
    const area2 = (xmax2 - xmin2) * (ymax2 - ymin2);
    const union = area1 + area2 - intersection;

    return union === 0 ? 0 : intersection / union;
  };

  // 按类别分组处理NMS
  const classGroups = detectionData.reduce((acc, box) => {
    acc[box.class] = acc[box.class] || [];
    acc[box.class].push(box);
    return acc;
  }, {});

  const finalData = [];
  Object.values(classGroups).forEach((group) => {
    // 按置信度降序排序
    group.sort((a, b) => b.confidence - a.confidence);

    let remainingBoxes = [...group];
    while (remainingBoxes.length > 0) {
      // 添加当前最高置信度的检测框
      const currentBox = remainingBoxes[0];
      finalData.push(currentBox);

      // 过滤与当前框重叠度高的检测框
      remainingBoxes = remainingBoxes.slice(1).filter((box) => {
        return calculateIoU(currentBox.bbox, box.bbox) <= nmsThreshold;
      });
    }
  });

  return finalData;
}

async function drawBoundingBoxes(
  imagePath,
  detectionData,
  outputPath,
  targetSize
) {
  try {
    const image = sharp(imagePath).resize(targetSize, targetSize);
    const metadata = await image.metadata();

    const svgOverlays = detectionData.map((detection) => {
      const [xmin, ymin, xmax, ymax] = detection.bbox.map((v) => Math.round(v));
      const className = detection.class;
      const confidence = detection.confidence.toFixed(2);

      const svg = Buffer.from(`
                <svg width="${targetSize}" height="${targetSize}">
                    <rect x="${xmin}" y="${ymin}" width="${
        xmax - xmin
      }" height="${ymax - ymin}" 
                          stroke="${boxLineColor}" fill="none" stroke-width="${boxLineWidth}"/>
                    <text x="${xmin + 5}" y="${ymin + 20}" 
                          fill="${boxTextColor}" font-size="${boxTextSize}" font-family="Arial">
                        ${className} ${confidence}
                    </text>
                </svg>
            `);

      return { input: svg, left: 0, top: 0 };
    });

    await image.composite(svgOverlays).toFile(outputPath);
    console.log(`结果已保存到 ${outputPath}`);
  } catch (error) {
    console.error(`绘制边界框失败: ${error}`);
  }
}

async function runInference(imagePath) {
  try {
    const session = await ort.InferenceSession.create("./onnx/yolov5n.onnx");
    const targetSize = 640; // 假设目标尺寸为 640x640
    const inputTensor = await preprocessImage(imagePath, targetSize);
    const feeds = { images: inputTensor };
    const results = await session.run(feeds);
    // 获取输出
    const outputTensor = results.output0;
    const detections = outputTensor.data;
    // 解析检测结果
    const detectionData = parseDetections(detections, 6);
    // 打印检测结果
    console.log(detectionData);

    // 绘制边界框
    const outputDir = "./output";
    if (!fs.existsSync(outputDir)) {
      fs.mkdirSync(outputDir, { recursive: true });
    }
    const outputFileName = `detected_${path.basename(imagePath)}`;
    const outputPath = path.join(outputDir, outputFileName);
    await drawBoundingBoxes(imagePath, detectionData, outputPath, targetSize);
  } catch (e) {
    console.error(`推理失败: ${e}`);
  }
}

// 置信度
const confidenceThreshold = 0.5;
const boxLineColor = "red";
const boxLineWidth = 2;
const boxTextColor = "white";
const boxTextSize = 12;
const nmsThreshold = 0.5;

// 主函数
runInference("./images/scratches_64.jpg");

module.exports = { preprocessImage };
