import 'dart:typed_data';

import 'package:easy_isolate_mixin/easy_isolate_mixin.dart';
import 'package:flutter/material.dart';
import 'package:google_mlkit_face_detection/google_mlkit_face_detection.dart';
import 'package:image/image.dart' as dartImage;

class FaceTaskDelegate with IsolateHelperMixin {
  ///转换图片
  dartImage.Image decodeYUV420SP(InputImage image) {
    final width = image.metadata!.size.width.toInt();
    final height = image.metadata!.size.height.toInt();

    final yuv420sp = image.bytes!;
    // The math for converting YUV to RGB below assumes you're
    // putting the RGB into a uint32. To simplify and keep the
    // code as it is, make a 4-channel Image, get the image data bytes,
    // and view it at a Uint32List. This is the equivalent to the image
    // data of the 3.x version of the Image library. It does waste some
    // memory, the alpha channel isn't used, but it simplifies the math.
    final outImg =
        dartImage.Image(width: width, height: height, numChannels: 4);
    final outBytes = outImg.getBytes();
    // View the image data as a Uint32List.
    final rgba = Uint32List.view(outBytes.buffer);

    final frameSize = width * height;

    for (var j = 0, yp = 0; j < height; j++) {
      var uvp = frameSize + (j >> 1) * width;
      var u = 0;
      var v = 0;
      for (int i = 0; i < width; i++, yp++) {
        var y = (0xff & (yuv420sp[yp])) - 16;
        if (y < 0) {
          y = 0;
        }
        if ((i & 1) == 0) {
          v = (0xff & yuv420sp[uvp++]) - 128;
          u = (0xff & yuv420sp[uvp++]) - 128;
        }

        final y1192 = 1192 * y;
        var r = (y1192 + 1634 * v);
        var g = (y1192 - 833 * v - 400 * u);
        var b = (y1192 + 2066 * u);

        if (r < 0) {
          r = 0;
        } else if (r > 262143) {
          r = 262143;
        }
        if (g < 0) {
          g = 0;
        } else if (g > 262143) {
          g = 262143;
        }
        if (b < 0) {
          b = 0;
        } else if (b > 262143) {
          b = 262143;
        }

        // Write directly into the image data
        rgba[yp] = 0xff000000 |
            ((b << 6) & 0xff0000) |
            ((g >> 2) & 0xff00) |
            ((r >> 10) & 0xff);
      }
    }

    // Rotate the image so it's the correct oreintation.
    // return dartImage.copyRotate(outImg, angle: 0);
    // return dartImage.copyResize(outImg, width: 300, height: 400);
    return outImg;
  }

  ///压缩人脸图像
  Future<Uint8List?> compressFaceImage({
    required InputImage inputImage,
    int quality = 50,
    dartImage.FlipDirection? flipDirection = dartImage.FlipDirection.horizontal,
    int? angle,
  }) async =>
      await loadWithIsolate(() async {
        DateTime watchTime = DateTime.now();
        try {
          if (flipDirection == null) {
            var outImage = decodeYUV420SP(inputImage);
            if (angle != null) {
              outImage = dartImage.copyRotate(outImage, angle: angle);
            }
            return Uint8List.fromList(dartImage.encodeJpg(
              outImage,
              quality: quality,
            ));
          } else {
            var outImage = dartImage.copyFlip(
              decodeYUV420SP(inputImage),
              direction: flipDirection,
            );
            if (angle != null) {
              outImage = dartImage.copyRotate(outImage, angle: angle);
            }
            return Uint8List.fromList(dartImage.encodeJpg(
              outImage,
              quality: quality,
            ));
          }
        } catch (e) {
          debugPrint('FaceTaskDelegate.compressFaceImage.error:$e');
        } finally {
          var watchMs = DateTime.now().millisecondsSinceEpoch -
              watchTime.millisecondsSinceEpoch;
          debugPrint(
              'FaceTaskDelegate.compressFaceImage.watch--->(${DateTime.now().toString()})${watchMs / 1000} s');
        }
        return null;
      });
}
