import 'dart:async';

import 'package:biometrics/components/sendable_rect.dart';
import 'package:biometrics/components/task_helper.dart';
import 'package:flutter/services.dart';
import 'package:opencv_dart/opencv_dart.dart' as cv;

class Recognizer {
  factory Recognizer() => _instance;
  static final Recognizer _instance = Recognizer._internal();
  Recognizer._internal() {
    init();
  }

  final TaskHelper _taskHelper = TaskHelper("recognize");
  static String? tmpModelPath;
  cv.FaceRecognizerSF? recognizer;

  /// 初始化人脸识别模型，如果已经初始化，则直接返回
  Future<cv.FaceRecognizerSF> init() async {
    if (recognizer == null) {
      tmpModelPath ??= await copyAssetFileToTmp(
          "assets/models/face_recognition_sface_2021dec.onnx");
      recognizer = cv.FaceRecognizerSF.fromFile(
        tmpModelPath!,
        "",
        // backendId: cv.DNN_BACKEND_VKCOM,
        // targetId: cv.DNN_TARGET_VULKAN,
      );
    }

    return recognizer!;
  }

  /// 销毁资源
  void dispose() async {
    recognizer?.dispose();
    recognizer = null;
  }

  /// 提取人脸特征
  /// [faceMat] 输入的原始图像
  /// [faceRect] 人脸检测的结果，表示人脸在输入图像中的位置（通常是一个包含边界框信息的矩阵，Mat 类型）
  ///
  Future<(List<double>, Uint8List?)?> extractFeatures(
    cv.Mat faceMat,
    SendableRect faceRect, {
    bool showFaceImage = true,
  }) async {
    return await _taskHelper.addTaskWhileIdle<(List<double>, Uint8List?)>(
        () => _extractFeatures(faceMat, faceRect));
  }

  Future<(List<double>, Uint8List?)> _extractFeatures(
    cv.Mat faceMat,
    SendableRect faceRect, {
    bool showFaceImage = true,
  }) async {
    cv.FaceRecognizerSF recognizer = await init();

    cv.Mat faceBox = cv.Mat.fromList(1, faceRect.rawDetection.length,
        cv.MatType.CV_32FC1, faceRect.rawDetection);
    // 人脸对齐
    cv.Mat alignedFace = recognizer.alignCrop(faceMat, faceBox);

    // 提取特征
    cv.Mat featureMat = recognizer.feature(alignedFace);
    List<double> feature = List.generate(
        featureMat.width, (index) => featureMat.at<double>(0, index));

    // 获取人脸照片
    final encodedFace =
        showFaceImage ? cv.imencode('.jpg', alignedFace).$2 : null;

    // 释放内存
    alignedFace.dispose();
    faceBox.dispose();

    return (feature, encodedFace);
  }

  /// 人脸比对
  /// [feature] 待匹配的人脸特征
  /// [featureList] 已存储的人脸特征列表
  /// [cosineThreshold] 余弦相似度阈值，默认为 0.38
  /// [l2normThreshold] L2 范数阈值，默认为 1.12
  Future<bool?> matchFaces(
    List<double> feature,
    List<List<double>> featureList, {
    double cosineThreshold = 0.38,
    double l2normThreshold = 1.12,
  }) async {
    final ret = await _taskHelper.addTaskWhileIdle<(double, double)>(
        () => _matchFaces(feature, featureList));
    if (ret == null) return null;
    final (cosine, l2norm) = ret;
    return cosine >= cosineThreshold && l2norm <= l2normThreshold;
  }

  /// 人脸比对
  /// [faceFeature] 待匹配的人脸特征
  /// [matchFeatures] 已存储的人脸特征
  /// 返回一个元组，包含两个值：余弦相似度和欧几里德距离
  Future<(double, double)> _matchFaces(
      List<double> faceFeature, List<List<double>> matchFeatures) async {
    cv.FaceRecognizerSF recognizer = await init();
    double scoreCosine = 0, scoreNormL2 = 0;

    for (var feature in matchFeatures) {
      final matchF =
          cv.Mat.fromList(1, feature.length, cv.MatType.CV_32FC1, feature);

      // 计算两个特征向量的余弦相似度和l2norm
      scoreCosine += recognizer.match(
        cv.Mat.fromList(
            1, faceFeature.length, cv.MatType.CV_32FC1, faceFeature),
        matchF,
        disType: cv.FaceRecognizerSF.FR_COSINE,
      );
      scoreNormL2 += recognizer.match(
        cv.Mat.fromList(
            1, faceFeature.length, cv.MatType.CV_32FC1, faceFeature),
        matchF,
        disType: cv.FaceRecognizerSF.FR_NORM_L2,
      );
    }

    return (
      scoreCosine / matchFeatures.length,
      scoreNormL2 / matchFeatures.length
    );
  }
}
