const ort = require('onnxruntime-node');
const sharp = require('sharp');
const fs = require('fs');
const path = require('path');

class PPOCRService {
  constructor() {
    this.detSession = null;
    this.recSession = null;
    this.clsSession = null;
    this.initialized = false;
  }

  async initialize() {
    try {
      // 模型文件路径
      const modelPath = path.join(__dirname, '../../models/pp-ocr');
      
      // 加载检测模型
      const detModelPath = path.join(modelPath, 'ch_PP-OCRv4_det_infer.onnx');
      if (fs.existsSync(detModelPath)) {
        this.detSession = await ort.InferenceSession.create(detModelPath);
      }
      
      // 加载识别模型
      const recModelPath = path.join(modelPath, 'ch_PP-OCRv4_rec_infer.onnx');
      if (fs.existsSync(recModelPath)) {
        this.recSession = await ort.InferenceSession.create(recModelPath);
      }
      
      // 加载方向分类器（可选）
      const clsModelPath = path.join(modelPath, 'ch_ppocr_mobile_v2.0_cls_infer.onnx');
      if (fs.existsSync(clsModelPath)) {
        this.clsSession = await ort.InferenceSession.create(clsModelPath);
      }
      
      this.initialized = true;
      console.log('PP-OCR模型初始化成功');
      return true;
    } catch (error) {
      console.error('PP-OCR模型初始化失败:', error);
      return false;
    }
  }

  async preprocessImage(imageBuffer) {
    try {
      // 使用sharp预处理图像
      const image = sharp(imageBuffer);
      const metadata = await image.metadata();
      
      // 调整图像大小，保持宽高比
      const maxSize = 960;
      let width = metadata.width;
      let height = metadata.height;
      
      if (width > maxSize || height > maxSize) {
        const ratio = Math.min(maxSize / width, maxSize / height);
        width = Math.round(width * ratio);
        height = Math.round(height * ratio);
      }
      
      // 转换为RGB格式的tensor
      const { data, info } = await image
        .resize(width, height)
        .raw()
        .toBuffer({ resolveWithObject: true });
      
      // 归一化到[0,1]并转换为CHW格式
      const channels = 3;
      const tensorData = new Float32Array(channels * info.width * info.height);
      
      for (let c = 0; c < channels; c++) {
        for (let h = 0; h < info.height; h++) {
          for (let w = 0; w < info.width; w++) {
            const pixelIndex = (h * info.width + w) * channels + c;
            const tensorIndex = c * info.height * info.width + h * info.width + w;
            tensorData[tensorIndex] = data[pixelIndex] / 255.0;
          }
        }
      }
      
      return {
        tensor: new ort.Tensor('float32', tensorData, [1, channels, info.height, info.width]),
        width: info.width,
        height: info.height
      };
    } catch (error) {
      console.error('图像预处理失败:', error);
      throw error;
    }
  }

  async detectText(imageTensor) {
    if (!this.detSession) {
      throw new Error('检测模型未加载');
    }
    
    try {
      const results = await this.detSession.run({ x: imageTensor.tensor });
      // 处理检测结果，提取文本区域
      return this.parseDetectionResults(results);
    } catch (error) {
      console.error('文本检测失败:', error);
      throw error;
    }
  }

  parseDetectionResults(results) {
    // 解析检测结果，返回文本框坐标
    // 这里需要根据PP-OCR的输出格式来解析
    const boxes = [];
    
    // 简化示例，实际需要根据模型输出格式来实现
    try {
      const output = results[Object.keys(results)[0]];
      // 处理检测结果...
      // 返回文本框坐标数组
    } catch (error) {
      console.error('解析检测结果失败:', error);
    }
    
    return boxes;
  }

  async recognizeText(imageBuffer) {
    try {
      if (!this.initialized) {
        await this.initialize();
      }
      
      if (!this.initialized) {
        throw new Error('PP-OCR模型未初始化');
      }
      
      // 预处理图像
      const processedImage = await this.preprocessImage(imageBuffer);
      
      // 文本检测
      const textBoxes = await this.detectText(processedImage);
      
      // 文本识别
      const recognitionResults = [];
      for (const box of textBoxes) {
        // 对每个文本框进行识别
        // 这里需要裁剪图像并送入识别模型
        const text = await this.recognizeTextBox(imageBuffer, box);
        recognitionResults.push({
          text: text,
          box: box,
          confidence: 0.9 // 置信度
        });
      }
      
      // 合并所有识别结果
      const fullText = recognitionResults.map(r => r.text).join('\n');
      
      return {
        success: true,
        text: fullText,
        confidence: 0.9,
        words: recognitionResults
      };
    } catch (error) {
      console.error('PP-OCR识别失败:', error);
      throw error;
    }
  }

  async recognizeTextBox(imageBuffer, box) {
    // 实现单个文本框的识别
    // 这里需要裁剪图像并送入识别模型
    return "识别的文字"; // 简化示例
  }
}

module.exports = new PPOCRService();