import React, { useState, useEffect, useRef } from 'react';
import * as tf from '@tensorflow/tfjs';
import * as facemesh from '@tensorflow-models/facemesh';

// 表情分析函数 - 优化版
const analyzeExpression = (face) => {
  if (!face || !face.scaledMesh) return 'neutral';
  
  // 获取关键特征点坐标
  const getPoint = (index) => face.scaledMesh[index] || [0, 0, 0];
  
  // 眼睛相关点
  const leftEyeTop = getPoint(159);
  const leftEyeBottom = getPoint(145);
  const rightEyeTop = getPoint(386);
  const rightEyeBottom = getPoint(374);
  
  // 嘴巴相关点
  const mouthLeft = getPoint(61);
  const mouthRight = getPoint(291);
  const mouthTop = getPoint(0);
  const mouthBottom = getPoint(17);
  const leftMouthCorner = getPoint(57);
  const rightMouthCorner = getPoint(287);
  
  // 眉毛相关点
  const leftBrowTop = getPoint(285);
  const leftBrowBottom = getPoint(276);
  const rightBrowTop = getPoint(46);
  const rightBrowBottom = getPoint(55);
  
  // 计算距离的辅助函数
  const verticalDistance = (top, bottom) => Math.abs(top[1] - bottom[1]);
  const horizontalDistance = (left, right) => Math.abs(left[0] - right[0]);
  
  // 计算各部分尺寸
  const leftEyeHeight = verticalDistance(leftEyeTop, leftEyeBottom);
  const rightEyeHeight = verticalDistance(rightEyeTop, rightEyeBottom);
  const eyeAvgHeight = (leftEyeHeight + rightEyeHeight) / 2;
  
  const mouthWidth = horizontalDistance(mouthLeft, mouthRight);
  const mouthHeight = verticalDistance(mouthTop, mouthBottom);
  
  const leftBrowHeight = verticalDistance(leftBrowTop, leftBrowBottom);
  const rightBrowHeight = verticalDistance(rightBrowTop, rightBrowBottom);
  const browAvgHeight = (leftBrowHeight + rightBrowHeight) / 2;
  
  // 计算相对比例（增加容错处理）
  const mouthOpenRatio = mouthWidth > 0 ? mouthHeight / mouthWidth : 0;
  const eyeOpenRatio = eyeAvgHeight > 0 && mouthWidth > 0 ? eyeAvgHeight / (mouthWidth * 0.3) : 0;
  const browRaiseRatio = eyeAvgHeight > 0 ? browAvgHeight / (eyeAvgHeight * 1.5) : 0;
  const mouthCornerRatio = (rightMouthCorner[1] + leftMouthCorner[1]) / 2 - mouthTop[1];
  
  // 表情判断逻辑（增加阈值容错）
  if (mouthOpenRatio > 0.3 && eyeOpenRatio > 1.1 && browRaiseRatio > 1.05) {
    return 'surprised';
  }
  
  if (mouthCornerRatio < -2.5 && mouthOpenRatio > 0.1) {
    return 'happy';
  }
  
  if (browRaiseRatio < 0.75 && mouthOpenRatio < 0.12 && mouthCornerRatio > 1.5) {
    return 'angry';
  }
  
  if (mouthCornerRatio > 2.5 && browRaiseRatio > 1.0) {
    return 'sad';
  }
  
  return 'neutral';
};

const FacialExpressionAnalyzer = () => {
  // 状态管理
  const [isActive, setIsActive] = useState(false);
  const [model, setModel] = useState(null);
  const [status, setStatus] = useState('准备中...');
  const [currentExpression, setCurrentExpression] = useState('neutral');
  const [expressionHistory, setExpressionHistory] = useState([]);
  const [error, setError] = useState('');
  const [hasCamera, setHasCamera] = useState(false);
  
  // DOM引用
  const videoRef = useRef(null);
  const canvasRef = useRef(null);
  const animationRef = useRef(null);
  
  // 检查设备和浏览器支持
  useEffect(() => {
    const checkSupport = async () => {
      // 检查TensorFlow.js支持
      if (!tf || !tf.version) {
        setError('TensorFlow.js 加载失败');
        return;
      }
      
      // 检查摄像头设备
      try {
        const devices = await navigator.mediaDevices.enumerateDevices();
        const cameraExists = devices.some(d => d.kind === 'videoinput');
        setHasCamera(cameraExists);
        
        if (!cameraExists) {
          setError('未检测到摄像头设备');
          setStatus('未检测到摄像头');
        } else {
          setStatus('请点击"开始分析"启动');
        }
      } catch (err) {
        setError(`设备检测失败: ${err.message}`);
        console.error('设备检测错误:', err);
      }
      
      // 检查WebGL支持
      const gl = document.createElement('canvas').getContext('webgl');
      if (!gl) {
        setError('您的浏览器不支持WebGL，无法运行面部检测');
      }
    };
    
    checkSupport();
    
    // 清理函数
    return () => {
      if (animationRef.current) {
        cancelAnimationFrame(animationRef.current);
      }
    };
  }, []);
  
  // 加载模型
  const loadModel = async () => {
    try {
      setStatus('正在加载模型...');
      setError('');
      
      // 加载轻量版模型以提高兼容性
      const loadedModel = await facemesh.load({
        inputResolution: { width: 640, height: 480 },
        scale: 0.8,
        flipHorizontal: true // 镜像翻转，提升用户体验
      });
      
      setModel(loadedModel);
      setStatus('模型加载完成，准备就绪');
      return loadedModel;
    } catch (err) {
      const errorMsg = `模型加载失败: ${err.message}`;
      setError(errorMsg);
      setStatus('加载失败');
      console.error(errorMsg, err);
      return null;
    }
  };
  
  // 启动摄像头
  const startCamera = async () => {
    try {
      setStatus('正在请求摄像头权限...');
      const stream = await navigator.mediaDevices.getUserMedia({
        video: { 
          width: { ideal: 640 },
          height: { ideal: 480 },
          facingMode: 'user' // 使用前置摄像头
        }
      });
      
      if (videoRef.current) {
        videoRef.current.srcObject = stream;
        
        // 视频准备就绪后开始检测
        videoRef.current.onloadedmetadata = () => {
          if (canvasRef.current) {
            // 同步Canvas尺寸
            canvasRef.current.width = videoRef.current.videoWidth;
            canvasRef.current.height = videoRef.current.videoHeight;
          }
          setStatus('摄像头已启动，开始检测...');
        };
      }
    } catch (err) {
      const errorMsg = `摄像头访问失败: ${err.message}`;
      setError(errorMsg);
      setStatus('摄像头访问失败');
      console.error(errorMsg, err);
      setIsActive(false);
    }
  };
  
  // 面部检测循环
  const detectFaces = async () => {
    if (!model || !videoRef.current || !isActive) return;
    
    try {
      // 检测面部
      const predictions = await model.estimateFaces(videoRef.current);
      
      // 绘制结果
      if (canvasRef.current) {
        const ctx = canvasRef.current.getContext('2d');
        if (ctx) {
          // 清除画布
          ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
          
          // 绘制检测到的面部特征点
          if (predictions.length > 0) {
            // 分析表情
            const expression = analyzeExpression(predictions[0]);
            setCurrentExpression(expression);
            
            // 更新历史记录
            setExpressionHistory(prev => {
              const newHistory = [...prev.slice(-29), expression]; // 保留最近30个记录
              return newHistory;
            });
            
            // 绘制特征点
            predictions.forEach(face => {
              // 绘制网格
              face.annotations && Object.values(face.annotations).forEach(annotation => {
                annotation.forEach((point, i, arr) => {
                  ctx.beginPath();
                  ctx.arc(point[0], point[1], 1.5, 0, 2 * Math.PI);
                  ctx.fillStyle = '#4285F4';
                  ctx.fill();
                  
                  // 绘制连接线
                  if (i < arr.length - 1) {
                    const nextPoint = arr[i + 1];
                    ctx.beginPath();
                    ctx.moveTo(point[0], point[1]);
                    ctx.lineTo(nextPoint[0], nextPoint[1]);
                    ctx.strokeStyle = 'rgba(66, 133, 244, 0.3)';
                    ctx.lineWidth = 0.8;
                    ctx.stroke();
                  }
                });
              });
            });
          } else {
            // 未检测到面部
            ctx.font = '16px Arial';
            ctx.fillStyle = '#ff6b6b';
            ctx.textAlign = 'center';
            ctx.fillText('未检测到面部', canvasRef.current.width / 2, canvasRef.current.height / 2);
          }
        }
      }
    } catch (err) {
      console.error('检测过程出错:', err);
      // 非致命错误，继续检测循环
    }
    
    // 继续下一帧检测
    animationRef.current = requestAnimationFrame(detectFaces);
  };
  
  // 开始分析
  const startAnalysis = async () => {
    if (isActive) return;
    
    setIsActive(true);
    setError('');
    
    // 确保模型已加载
    const loadedModel = model || await loadModel();
    if (!loadedModel) {
      setIsActive(false);
      return;
    }
    
    // 启动摄像头
    await startCamera();
    
    // 开始检测循环
    detectFaces();
  };
  
  // 停止分析
  const stopAnalysis = () => {
    setIsActive(false);
    
    // 停止视频流
    if (videoRef.current && videoRef.current.srcObject) {
      videoRef.current.srcObject.getTracks().forEach(track => track.stop());
      videoRef.current.srcObject = null;
    }
    
    // 停止动画循环
    if (animationRef.current) {
      cancelAnimationFrame(animationRef.current);
    }
    
    setStatus('已停止，请点击"开始分析"重新启动');
  };
  
  // 计算表情统计
  const getExpressionStats = () => {
    const stats = {
      happy: 0,
      sad: 0,
      angry: 0,
      surprised: 0,
      neutral: 0
    };
    
    expressionHistory.forEach(exp => {
      if (stats[exp]) stats[exp]++;
    });
    
    return stats;
  };
  
  const expressionStats = getExpressionStats();
  
  // 表情对应的样式和文本
  const expressionConfig = {
    happy: { text: '开心', color: '#4CAF50' },
    sad: { text: '悲伤', color: '#2196F3' },
    angry: { text: '生气', color: '#F44336' },
    surprised: { text: '惊讶', color: '#FFC107' },
    neutral: { text: '中性', color: '#9E9E9E' }
  };
  
  return (
    <div style={{ 
      maxWidth: '1200px', 
      margin: '0 auto', 
      padding: '20px', 
      fontFamily: 'Arial, sans-serif' 
    }}>
      <h2>实时面部表情分析</h2>
      
      {/* 状态和错误提示 */}
      <div style={{ marginBottom: '15px', padding: '10px', borderRadius: '4px' }}>
        <div>状态: <strong>{status}</strong></div>
        {error && (
          <div style={{ color: '#dc3545', marginTop: '5px' }}>
            错误: {error}
          </div>
        )}
      </div>
      
      {/* 控制按钮 */}
      <div style={{ marginBottom: '20px' }}>
        {!isActive ? (
          <button
            onClick={startAnalysis}
            disabled={!hasCamera || !!error}
            style={{
              padding: '10px 20px',
              fontSize: '16px',
              backgroundColor: '#2196F3',
              color: 'white',
              border: 'none',
              borderRadius: '4px',
              cursor: 'pointer',
              disabled: { opacity: 0.5, cursor: 'not-allowed' }
            }}
          >
            开始分析
          </button>
        ) : (
          <button
            onClick={stopAnalysis}
            style={{
              padding: '10px 20px',
              fontSize: '16px',
              backgroundColor: '#F44336',
              color: 'white',
              border: 'none',
              borderRadius: '4px',
              cursor: 'pointer'
            }}
          >
            停止分析
          </button>
        )}
      </div>
      
      {/* 视频和画布容器 */}
      <div style={{ 
        position: 'relative', 
        width: '100%', 
        maxWidth: '640px', 
        margin: '0 auto',
        border: '1px solid #ddd',
        borderRadius: '4px',
        overflow: 'hidden'
      }}>
        {/* 视频元素 - 隐藏式显示，用于捕获画面 */}
        <video
          ref={videoRef}
          autoPlay
          muted
          playsInline
          style={{ 
            width: '100%', 
            height: 'auto',
            display: 'block'
          }}
        />
        
        {/* 画布 - 用于绘制特征点 */}
        <canvas
          ref={canvasRef}
          style={{
            position: 'absolute',
            top: 0,
            left: 0,
            width: '100%',
            height: '100%'
          }}
        />
        
        {/* 当前表情显示 */}
        {isActive && (
          <div style={{
            position: 'absolute',
            bottom: '10px',
            left: '10px',
            padding: '8px 12px',
            backgroundColor: 'rgba(0,0,0,0.7)',
            color: 'white',
            borderRadius: '4px',
            fontSize: '18px'
          }}>
            当前表情: <span style={{ color: expressionConfig[currentExpression].color }}>
              {expressionConfig[currentExpression].text}
            </span>
          </div>
        )}
      </div>
      
      {/* 统计信息 */}
      {expressionHistory.length > 0 && (
        <div style={{ marginTop: '30px' }}>
          <h3>表情统计</h3>
          <div style={{ display: 'flex', flexWrap: 'wrap', gap: '10px', marginBottom: '20px' }}>
            {Object.entries(expressionStats).map(([key, value]) => (
              <div key={key} style={{ display: 'flex', alignItems: 'center' }}>
                <div style={{
                  width: '15px',
                  height: '15px',
                  backgroundColor: expressionConfig[key].color,
                  marginRight: '5px',
                  borderRadius: '3px'
                }}></div>
                <span>{expressionConfig[key].text}: {value}次</span>
              </div>
            ))}
          </div>
        </div>
      )}
    </div>
  );
};

export default FacialExpressionAnalyzer;
    