import React, { useRef, useState } from 'react';
import { Button, Upload, message } from 'antd';
import * as faceapi from 'face-api.js';

const ImageUpload: React.FC = () => {
  const canvasRef = useRef<HTMLCanvasElement>(null);
  const [base64Image, setBase64Image] = useState<string>('');
  const [croppedFace, setCroppedFace] = useState<string>('');

  const handleImageUpload = async (file: File) => {
    if (!file) return;

    // 加载模型
    const modelUrl = '/models';
    await Promise.all([
      faceapi.nets.ssdMobilenetv1.loadFromUri(modelUrl + '/ssd_mobilenetv1'),
      faceapi.nets.tinyFaceDetector.loadFromUri(
        modelUrl + '/tiny_face_detector'
      ),
      faceapi.nets.faceLandmark68Net.loadFromUri(
        modelUrl + '/face_landmark_68'
      ),
    ]);

    const reader = new FileReader();
    reader.onload = async (e) => {
      const img = new Image();
      img.src = e.target?.result as string;
      img.onload = async () => {
        if (!canvasRef.current) return;
        const canvas = canvasRef.current;
        const context = canvas.getContext('2d');
        if (!context) return;

        // 设置画布尺寸与图片相同
        canvas.width = img.width;
        canvas.height = img.height;
        context.drawImage(img, 0, 0, img.width, img.height);
        setBase64Image(canvas.toDataURL('image/png'));

        // 在画布上进行人脸检测
        const detections = await faceapi
          .detectAllFaces(canvas, new faceapi.SsdMobilenetv1Options())
          .withFaceLandmarks();
        console.log('detections', detections);

        if (detections.length > 0) {
          const largestFace = detections.reduce((prev, curr) =>
            curr.detection.box.area > prev.detection.box.area ? curr : prev
          );

          const { x, y, width, height } = largestFace.detection.box;

          const marginFactor = 0.2;
          const marginX = width * marginFactor;
          const marginY = height * marginFactor;

          let newX = x - marginX;
          let newY = y - marginY;
          let newWidth = width + 2 * marginX;
          let newHeight = height + 2 * marginY;

          newX = newX < 0 ? 0 : newX;
          newY = newY < 0 ? 0 : newY;
          if (newX + newWidth > canvas.width) {
            newWidth = canvas.width - newX;
          }
          if (newY + newHeight > canvas.height) {
            newHeight = canvas.height - newY;
          }

          const faceCanvas = document.createElement('canvas');
          faceCanvas.width = newWidth;
          faceCanvas.height = newHeight;
          const faceCtx = faceCanvas.getContext('2d');

          if (faceCtx) {
            faceCtx.drawImage(
              canvas,
              newX,
              newY,
              newWidth,
              newHeight,
              0,
              0,
              newWidth,
              newHeight
            );
            setCroppedFace(faceCanvas.toDataURL('image/png'));
          }
        } else {
          message.warning('未检测到人脸');
        }
      };
    };
    reader.readAsDataURL(file);
  };

  return (
    <div style={{ textAlign: 'center' }}>
      <h1>上传图片进行人脸检测</h1>
      <Upload
        beforeUpload={(file) => {
          handleImageUpload(file);
          return false; // 阻止自动上传
        }}
        showUploadList={false}
      >
        <Button type="primary">选择图片</Button>
      </Upload>
      <canvas ref={canvasRef} style={{ display: 'none' }} />
      {croppedFace ? (
        <div
          style={{
            margin: '20px',
            display: 'flex',
            justifyContent: 'space-between',
          }}
        >
          <div>
            <h3>原始图片预览:</h3>
            <img src={base64Image} alt="Uploaded" style={{ width: '60%' }} />
          </div>
          <div>
            <h3>裁剪的人脸预览:</h3>
            <img
              src={croppedFace}
              alt="Cropped Face"
              style={{ width: '60%' }}
            />
          </div>
        </div>
      ) : null}
    </div>
  );
};

export default ImageUpload;
