import * as faceapi from 'face-api.js'

/**
 * 插件配置选项
 */
export class FaceApiOptions {
  constructor(options = {}) {
    // 人脸检测配置
    this.detectorOptions = {
      // 输入图片大小,必须是32的倍数,越大精度越高但速度越慢
      inputSize: options.inputSize || 224,
      // 置信度阈值
      scoreThreshold: options.scoreThreshold || 0.3
    }
    
    // 人脸裁剪配置
    this.cropOptions = {
      // 裁剪区域padding
      padding: options.padding || 60,
      // 是否返回base64图片
      returnBase64: options.returnBase64 !== false
    }
    
    // 特征提取配置
    this.featureOptions = {
      // 是否提取人脸特征点
      landmarks: options.landmarks !== false,
      // 是否识别表情
      expressions: options.expressions || false, 
      // 是否识别年龄性别
      ageGender: options.ageGender || false,
      // 是否计算人脸描述符(用于人脸识别)
      descriptor: options.descriptor || false
    }
  }
}

export default class FaceApiPlugin {
  /**
   * @param {Object} options 插件配置选项
   * @param {string} [modelUrl] 模型文件路径,支持:
   * - 完整URL: https://your-cdn.com/weights
   * - 相对路径: /static/face-api/weights
   * - 本地路径: static/weights
   */
  constructor(options = {}, modelUrl) {
    this.modelsLoaded = false
    this.options = new FaceApiOptions(options)
    
    // 处理模型路径
    this.MODEL_URL = this.resolveModelUrl(modelUrl)
  }

  /**
   * 解析模型路径
   * @private
   * @param {string} url 
   * @returns {string}
   */
  resolveModelUrl(url) {
    // 如果没有提供url,使用默认CDN
    if (!url) {
      return 'https://cd.h5.yingzhu.net/static/face-api.js/weights'
    }
    
    // 如果是完整URL,直接返回
    if (url.startsWith('http')) {
      return url
    }
    
    // 处理相对路径
    if (url.startsWith('/')) {
      return url
    }
    
    // 处理本地路径
    // 优先查找项目根目录的static
    if (uni.getStorageSync('__face_api_local_weights__')) {
      return `/static/face-api/weights`
    }
    
    // 其次查找插件目录下的static
    return `/uni_modules/face-api/static/weights`
  }

  /**
   * 初始化face-api模型
   * @returns {Promise<void>}
   */
  async init() {
    if (this.modelsLoaded) return
    
    const maxRetries = 3
    let currentRetry = 0
    
    while (currentRetry < maxRetries) {
      try {
        // 加载基础模型
        await faceapi.nets.tinyFaceDetector.load(this.MODEL_URL + '/tiny_face_detector_model-weights_manifest.json')
        await faceapi.nets.faceLandmark68Net.load(this.MODEL_URL + '/face_landmark_68_model-weights_manifest.json')
        
        // 根据配置加载额外模型
        if (this.options.featureOptions.expressions) {
          await faceapi.nets.faceExpressionNet.load(this.MODEL_URL + '/face_expression_model-weights_manifest.json')
        }
        if (this.options.featureOptions.ageGender) {
          await faceapi.nets.ageGenderNet.load(this.MODEL_URL + '/age_gender_model-weights_manifest.json') 
        }
        if (this.options.featureOptions.descriptor) {
          await faceapi.nets.faceRecognitionNet.load(this.MODEL_URL + '/face_recognition_model-weights_manifest.json')
        }
        
        this.modelsLoaded = true
        return
      } catch (error) {
        currentRetry++
        if (currentRetry < maxRetries) {
          await new Promise(resolve => setTimeout(resolve, 2000))
        }
      }
    }
    
    throw new Error('模型加载失败，已达到最大重试次数')
  }

  /**
   * 检测图片中的人脸
   * @param {HTMLImageElement} image 
   * @returns {Promise<Object>} 返回检测结果
   */
  async detectFaces(image) {
    if (!this.modelsLoaded) {
      throw new Error('请先调用init()方法初始化模型')
    }

    try {
      // 配置检测选项
      const detectorOptions = new faceapi.TinyFaceDetectorOptions({
        inputSize: this.options.detectorOptions.inputSize,
        scoreThreshold: this.options.detectorOptions.scoreThreshold
      })

      // 构建检测任务
      let task = faceapi.detectAllFaces(image, detectorOptions)
      
      // 根据配置添加特征提取任务
      if (this.options.featureOptions.landmarks) {
        task = task.withFaceLandmarks()
      }
      if (this.options.featureOptions.expressions) {
        task = task.withFaceExpressions()
      }
      if (this.options.featureOptions.ageGender) {
        task = task.withAgeAndGender()
      }
      if (this.options.featureOptions.descriptor) {
        task = task.withFaceDescriptors()
      }
      
      // 执行检测
      const detections = await task
      
      // 处理检测结果
      const results = await Promise.all(detections.map(async detection => {
        const result = {
          detection: detection.detection,
          landmarks: detection.landmarks,
          expressions: detection.expressions,
          age: detection.age,
          gender: detection.gender,
          genderProbability: detection.genderProbability,
          descriptor: detection.descriptor
        }
        
        // 如果需要裁剪人脸图片
        if (this.options.cropOptions.returnBase64) {
          result.faceImage = await this.cropFace(image, detection, this.options.cropOptions.padding)
        }
        
        return result
      }))

      return results
    } catch (error) {
      console.error('人脸检测失败:', error)
      throw error
    }
  }

  /**
   * 裁剪检测到的人脸
   * @param {HTMLImageElement} image 
   * @param {Object} detection 
   * @param {number} padding 
   * @returns {Promise<string>} 返回base64格式的图片数据
   */
  async cropFace(image, detection, padding = 60) {
    if (!detection) {
      throw new Error('未检测到人脸')
    }

    const box = detection.detection.box
    const scale = 1
    
    // 计算裁剪区域
    const cropX = Math.max(0, Math.round(box.x * scale) - padding)
    const cropY = Math.max(0, Math.round(box.y * scale) - padding)
    const cropWidth = Math.min(
      Math.round(image.width * scale) - cropX,
      Math.round(box.width * scale) + 2 * padding
    )
    const cropHeight = Math.min(
      Math.round(image.height * scale) - cropY,
      Math.round(box.height * scale) + 2 * padding
    )
    
    // 创建canvas进行裁剪
    const canvas = document.createElement('canvas')
    canvas.width = cropWidth
    canvas.height = cropHeight
    const ctx = canvas.getContext('2d')
    
    // 绘制裁剪区域
    ctx.drawImage(
      image,
      cropX / scale, cropY / scale, cropWidth / scale, cropHeight / scale,
      0, 0, cropWidth, cropHeight
    )
    
    // 返回base64格式图片
    return canvas.toDataURL('image/jpeg', 0.8)
  }
}