import 'dart:async';
import 'dart:ui' as ui;
import 'dart:math';

import 'package:biometrics/components/controller.dart';
import 'package:biometrics/components/sendable_rect.dart';
import 'package:biometrics/components/task_helper.dart';
import 'package:biometrics/opencv/opencv_utils.dart';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:opencv_dart/opencv_dart.dart' as cv;
import 'package:permission_handler/permission_handler.dart';

class ImagePainter extends CustomPainter {
  final ui.Image image;
  late Paint mainPaint;
  ImagePainter(this.image) {
    mainPaint = Paint()..isAntiAlias = true;
  }

  @override
  void paint(Canvas canvas, Size size) {
    canvas.drawImage(image, Offset.zero, mainPaint);
  }

  @override
  bool shouldRepaint(CustomPainter oldDelegate) {
    return true;
  }
}

/// 相机识别
class Facedetectview extends StatefulWidget {
  const Facedetectview({super.key, required this.controller});
  final Controller controller;

  @override
  State<Facedetectview> createState() => FacedetectviewState();
}

class FacedetectviewState extends State<Facedetectview> {
  int faceCount = 0;
  ui.Image? image;
  late Size size;
  bool isRemember = false;
  static cv.VideoCapture? cap;
  RootIsolateToken rootIsolateToken = RootIsolateToken.instance!;
  late String tmpModelPath;
  bool isRunning = false;

  /// 识别人脸坐标
  SendableRect? landmarks;
  final TaskHelper _taskHelper = TaskHelper("view");

  @override
  void initState() {
    super.initState();
    _init();
  }

  @override
  void dispose() {
    cap?.dispose();
    image?.dispose();

    super.dispose();
  }

  Future<void> _init() async {
    widget.controller.addTip("申请权限...");

    tmpModelPath = await copyAssetFileToTmp(
        "assets/models/face_detection_yunet_2023mar.onnx");
    var permissionStatus = await Permission.camera.request();
    if (permissionStatus.isGranted) {
      widget.controller.addTip("获取权限成功，正在打开摄像头...", level: TipLevel.success);
      cap = cv.VideoCapture.fromDevice(1, apiPreference: cv.CAP_ANY);

      if (!cap!.isOpened) {
        widget.controller.addTip("相机启动失败，请重新尝试", level: TipLevel.error);
        return;
      } else {
        widget.controller.addTip("打开摄像头成功，开始拍摄", level: TipLevel.success);

        widget.controller.callWhenDetectComplete((v) {
          if (detectTipId != null) {
            widget.controller.addTip("识别成功", level: TipLevel.success);
          }
        });
      }
    } else {
      widget.controller.addTip("获取权限失败，请赋予权限", level: TipLevel.error);
    }
  }

  int _nearestMultipleOfThree(int number) {
    int remainder = number % 3;
    if (remainder == 0) {
      return number;
    } else if (remainder == 1) {
      return number - 1;
    } else {
      return number + 1;
    }
  }

  int thickness = 2;

  void _drawLandmarks(cv.Mat mat, SendableRect landmarks) {
    // 根据关键点位绘制人脸框和特质部位点
    int hLenth = landmarks.width ~/ 3;
    int vLenth = landmarks.height ~/ 3;
    int lenth = hLenth < vLenth ? hLenth : vLenth;

    // 绘制人脸框
    cv.line(
      mat,
      cv.Point(landmarks.x, landmarks.y),
      cv.Point(landmarks.x + lenth, landmarks.y),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x + landmarks.width - lenth, landmarks.y),
      cv.Point(landmarks.x + landmarks.width, landmarks.y),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x, landmarks.y + landmarks.height),
      cv.Point(landmarks.x + lenth, landmarks.y + landmarks.height),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x + landmarks.width - lenth,
          landmarks.y + landmarks.height),
      cv.Point(landmarks.x + landmarks.width, landmarks.y + landmarks.height),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x, landmarks.y),
      cv.Point(landmarks.x, landmarks.y + lenth),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x, landmarks.y + landmarks.height - lenth),
      cv.Point(landmarks.x, landmarks.y + landmarks.height),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x + landmarks.width, landmarks.y),
      cv.Point(landmarks.x + landmarks.width, landmarks.y + lenth),
      cv.Scalar.green,
      thickness: thickness,
    );
    cv.line(
      mat,
      cv.Point(landmarks.x + landmarks.width,
          landmarks.y + landmarks.height - lenth),
      cv.Point(landmarks.x + landmarks.width, landmarks.y + landmarks.height),
      cv.Scalar.green,
      thickness: thickness,
    );

    // 绘制特征点
    for (var featurePoint in landmarks.facialFeatures) {
      cv.circle(
        mat,
        cv.Point(featurePoint.$1, featurePoint.$2),
        thickness,
        cv.Scalar.green,
        thickness: -1,
      );
    }
  }

  /// 将 OpenCV 捕获的视频帧绘制到 Canvas 上
  Future<(cv.Mat, ui.Image?, Uint8List)> _getCanvasImage(cv.Mat rawMat) async {
    // 转化BGR，并针对前置摄像头翻转处理
    var imgMat = cv.cvtColor(rawMat, cv.COLOR_YUV2BGR_NV21);
    var flipMat = cv.transpose(imgMat);
    flipMat = cv.flip(flipMat, -1);

    // 裁剪出中间区域
    final minSize = min(flipMat.cols, flipMat.rows);
    final x = _nearestMultipleOfThree((minSize / 3).toInt());
    final width = minSize - x;
    final rectRoi = cv.Rect(0, 0, width, width);
    var cropMat = cv.Mat.fromMat(flipMat, roi: rectRoi);
    size = Size(width.toDouble(), width.toDouble()) / 2;
    cropMat = cv.resize(cropMat, (size.width.toInt(), size.width.toInt()));

    if (landmarks != null) _drawLandmarks(cropMat, landmarks!);

    // 将获取的视频帧转化为 flutter 可渲染图像
    var data = cv.imencode(".jpg", cropMat).$2;
    // 创建canvas帧
    ui.Codec codec = await ui.instantiateImageCodec(data);
    ui.FrameInfo canvasFrame = await codec.getNextFrame();
    rawMat.dispose();
    codec.dispose();
    imgMat.dispose();
    flipMat.dispose();
    rectRoi.dispose();
    return (cropMat, canvasFrame.image, data);
  }

  void _handleDetectResult(List<SendableRect>? faces, cv.Mat resizedMat) async {
    if (faces == null) return;
    // 获取识别结果以及识别时resize过的Mat数据
    resizedMat = cv.resize(resizedMat, (320, 320));

    if (faces.isNotEmpty) {
      faceCount = faces.length;
      if (faces.length == 1) {
        // 单张人脸，可以进行识别
        // 记录边界框和关键点坐标
        landmarks = faces[0];
        final result = await recognizer.extractFeatures(resizedMat, faces[0]);
        if (result != null) {
          final (feature, encodedFace!) = result;
          _taskHelper.addTaskWhileIdle(
              () => widget.controller.updateDetectFeature(feature));
        } else {
          // LOG.info << "识别失败" << faces[0];
        }
      } else {
        // 多张人脸
      }
    } else {
      faceCount = 0;
      landmarks = null;
    }
  }

  int? blurryTipId;
  int? brightnessTipId;
  int? detectTipId;
  int? noFaceTipId;
  void _addPostFrameCallback() {
    WidgetsBinding.instance.addPostFrameCallback((_) async {
      if (cap != null && cap!.isOpened) {
        final frame = cap!.read().$2;

        _getCanvasImage(frame).then((v) {
          final (imgMat, cimage, bytes) = v;
          image?.dispose();
          image = cimage;
          // if (widget.controller.isCompleted) return;
          // return;
          // 图像模糊
          if (OpencvUtils.isBlurry(imgMat, threshold: 100)) {
            blurryTipId ??= widget.controller
                .addTip("图像模糊，请调整摄像机继续尝试", level: TipLevel.warning);
            if (detectTipId != null) {
              widget.controller.removeTip(blurryTipId!);
              blurryTipId = null;
            }
            return;
          } else if (blurryTipId != null) {
            widget.controller.removeTip(blurryTipId!);
            blurryTipId = null;
          }
          // 亮度过低
          if (!OpencvUtils.isBrightness(imgMat, brightnessThreshold: 20)) {
            brightnessTipId ??= widget.controller
                .addTip("图像亮度过低，请调整环境亮度再继续尝试", level: TipLevel.warning);
            if (detectTipId != null) {
              widget.controller.removeTip(detectTipId!);
              blurryTipId = null;
            }
            return;
          } else if (brightnessTipId != null) {
            widget.controller.removeTip(brightnessTipId!);
            brightnessTipId = null;
          }

          detectTipId ??=
              widget.controller.addTip("正在识别人脸...", level: TipLevel.info);
          // 识别人脸
          if (isRunning) {
            imgMat.dispose();
            return;
          }
          isRunning = true;
          Future.microtask(() async {
            var ret = await detector.detect(imgMat);
            if (ret != null) {
              var data = ret.$1;
              _handleDetectResult(data, imgMat);
              imgMat.dispose();
              isRunning = false;
            }
          });
        });
        frame.dispose();
      }
      setState(() {});
    });
  }

  @override
  Widget build(BuildContext context) {
    _addPostFrameCallback();
    return LayoutBuilder(builder: (ctx, constraints) {
      return Column(
        children: [
          if (image != null)
            SizedBox(
              width: size.width,
              height: size.height,
              child: ClipOval(
                child: SizedBox.expand(
                  child: CustomPaint(painter: ImagePainter(image!)),
                ),
              ),
            ),
          Expanded(
              child: ListView(
            children: widget.controller.tips,
          ))
        ],
      );
    });
  }
}
