#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
微表情识别系统Web应用
"""

import os
import logging
import time
import uuid
import sys
import numpy as np
from flask import Flask, render_template, request, jsonify, send_from_directory, url_for
from werkzeug.utils import secure_filename
import torch
import cv2
import json
import torch.nn.functional as F

# 创建Flask应用，指定正确的模板和静态文件目录
app = Flask(__name__, 
            template_folder='src/web/templates', 
            static_folder='src/web/static')

# 配置文件上传
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov', 'webm'}
RESULT_FOLDER = 'results'

# 确保上传和结果目录存在
os.makedirs(os.path.join(app.root_path, app.static_folder, UPLOAD_FOLDER), exist_ok=True)
os.makedirs(os.path.join(app.root_path, app.static_folder, RESULT_FOLDER), exist_ok=True)

app.config['UPLOAD_FOLDER'] = os.path.join(app.static_folder, UPLOAD_FOLDER)
app.config['RESULT_FOLDER'] = os.path.join(app.static_folder, RESULT_FOLDER)
app.config['MAX_CONTENT_LENGTH'] = 32 * 1024 * 1024  # 32MB限制

# 全局变量
emotion_model = None
device = None
face_detector = None
config = None
emotion_labels = ['生气', '厌恶', '恐惧', '高兴', '悲伤', '惊讶', '中性']

# 添加全局变量用于缓存
emotion_cache = {
    "last_emotions": [],  # 最近几次的情绪识别结果
    "cache_size": 3       # 缓存大小
}

def allowed_file(filename):
    """检查文件是否允许上传"""
    allowed_extensions = config['webapp']['allowed_extensions'] if config and 'webapp' in config else ALLOWED_EXTENSIONS
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in allowed_extensions


def load_configuration():
    """加载系统配置"""
    default_config = {
        'model': {
            'type': 'MOCK',  # 模型类型：'MOCK', 'SWIN3D', 'EfficientNet'
            'checkpoint_path': 'models/model_checkpoint.pth',
            'device': 'cuda' if torch.cuda.is_available() else 'cpu'
        },
        'preprocessing': {
            'face_detector': 'MOCK',  # 人脸检测器类型：'MOCK', 'HOG', 'OPENCV'
            'normalize': True,
            'augmentation': False
        },
        'dataset': {
            'frame_size': (224, 224),  # 高度和宽度
            'num_frames': 16,  # 每个视频片段的帧数
            'emotion_labels': ['anger', 'contempt', 'disgust', 'fear', 'happiness', 'sadness', 'surprise']
        },
        'webapp': {
            'host': '0.0.0.0',
            'port': 5000,
            'debug': True,
            'upload_folder': 'uploads',
            'allowed_extensions': ['mp4', 'avi', 'mov', 'webm']
        }
    }
    
    # 尝试从文件加载配置
    config_path = os.path.join(os.path.dirname(__file__), 'config.json')
    if os.path.exists(config_path):
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                user_config = json.load(f)
                # 合并用户配置与默认配置
                deep_update(default_config, user_config)
        except Exception as e:
            logging.error(f"加载配置文件失败: {str(e)}")
    
    return default_config

def deep_update(d, u):
    """深度更新字典"""
    for k, v in u.items():
        if isinstance(v, dict) and k in d and isinstance(d[k], dict):
            deep_update(d[k], v)
        else:
            d[k] = v

class MockFaceDetector:
    """模拟人脸检测器，用于测试或当真实检测器不可用时"""
    
    def __call__(self, frame):
        """检测人脸，始终返回一个中心位置的人脸矩形"""
        try:
            height, width = frame.shape[:2]
            # 返回中心区域作为人脸
            center_x = width // 2
            center_y = height // 2
            box_size = min(width, height) // 2
            
            left = center_x - box_size // 2
            top = center_y - box_size // 2
            right = center_x + box_size // 2
            bottom = center_y + box_size // 2
            
            return [(left, top, right, bottom)]
        except Exception as e:
            logging.warning(f"MockFaceDetector处理帧时出错: {e}")
            # 返回一个默认的人脸矩形
            return [(0, 0, 100, 100)]

class MockModel:
    """模拟模型，用于测试或当真实模型不可用时"""
    
    def __init__(self):
        self.device = 'cpu'
    
    def __call__(self, x):
        """模拟预测输出"""
        batch_size = x.shape[0]
        num_classes = len(emotion_labels) if 'emotion_labels' in globals() else 7
        
        # 创建随机概率分布
        import torch.nn.functional as F
        import torch
        
        # 生成随机logits
        logits = torch.randn(batch_size, num_classes)
        # 将主要类别的值增大以增加其概率
        for i in range(batch_size):
            main_class = torch.randint(0, num_classes, (1,)).item()
            logits[i, main_class] += 3.0
        
        # 返回logits
        return logits
    
    def to(self, device):
        """模拟设备移动"""
        self.device = device
        return self
    
    def eval(self):
        """模拟评估模式"""
        return self

def initialize_face_detector(detector_type):
    """初始化人脸检测器"""
    if detector_type == 'HOG':
        # 使用dlib的HOG人脸检测器
        try:
            import dlib
            return dlib.get_frontal_face_detector()
        except ImportError:
            logging.warning("无法导入dlib，将使用模拟检测器")
            return MockFaceDetector()
    
    elif detector_type == 'OPENCV':
        # 使用OpenCV Haar级联检测器
        try:
            cascade_path = os.path.join(cv2.__path__[0], 'data', 'haarcascade_frontalface_default.xml')
            if not os.path.exists(cascade_path):
                logging.warning(f"未找到级联文件: {cascade_path}")
                cascade_path = 'haarcascade_frontalface_default.xml'
            
            face_detector = cv2.CascadeClassifier(cascade_path)
            if face_detector.empty():
                logging.warning("OpenCV人脸检测器初始化失败，将使用模拟检测器")
                return MockFaceDetector()
            return face_detector
        except Exception as e:
            logging.warning(f"OpenCV人脸检测器初始化失败: {str(e)}，将使用模拟检测器")
            return MockFaceDetector()
    
    elif detector_type == 'MOCK':
        # 使用模拟检测器
        return MockFaceDetector()
    
    else:
        logging.warning(f"未知的人脸检测器类型: {detector_type}，将使用模拟检测器")
        return MockFaceDetector()

def initialize_model(model_config):
    """初始化模型"""
    model_type = model_config.get('type', 'MOCK')
    # 确保在没有CUDA可用时，即使配置指定CUDA，也使用CPU
    device = model_config.get('device', 'cpu')
    if 'cuda' in device and not torch.cuda.is_available():
        device = 'cpu'
        logging.warning("配置指定CUDA但系统不支持，将使用CPU代替")
    
    checkpoint_path = model_config.get('checkpoint_path', '')
    
    # 检查是否有微调后的模型文件
    finetuned_model_path = 'models/finetuned_model.pth'
    if os.path.exists(finetuned_model_path):
        logging.info(f"发现微调后的模型文件: {finetuned_model_path}")
        
    if model_type == 'SWIN3D':
        try:
            from models.swin_transformer_3d import SwinTransformer3D
            
            # 创建模型实例
            emotion_model = SwinTransformer3D(
                embed_dim=96,
                depths=[2, 2, 6, 2],
                num_heads=[3, 6, 12, 24],
                patch_size=(2, 4, 4),
                window_size=(8, 7, 7),
                drop_path_rate=0.1,
                in_chans=3,
                num_classes=len(emotion_labels),
                patch_norm=True
            )
            
            # 优先尝试加载微调后的模型
            if os.path.exists(finetuned_model_path) and model_config.get('finetune_head', False):
                try:
                    logging.info(f"加载微调后的模型: {finetuned_model_path}")
                    map_location = device if 'cuda' in device and torch.cuda.is_available() else 'cpu'
                    finetuned_state_dict = torch.load(finetuned_model_path, map_location=map_location)
                    emotion_model.load_state_dict(finetuned_state_dict)
                    logging.info("成功加载微调后的模型")
                    
                    emotion_model = emotion_model.to(device)
                    emotion_model.eval()
                    return emotion_model
                except Exception as e:
                    logging.error(f"加载微调后的模型失败: {str(e)}")
                    logging.info("将尝试加载原始预训练模型")
            
            # 加载检查点
            if os.path.exists(checkpoint_path):
                try:
                    # 在非CUDA环境中，确保使用CPU加载模型
                    map_location = device if 'cuda' in device and torch.cuda.is_available() else 'cpu'
                    checkpoint = torch.load(checkpoint_path, map_location=map_location)
                    
                    # 检查checkpoint格式，处理不同格式的state_dict
                    if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
                        # 如果是嵌套的model_state_dict格式
                        state_dict = checkpoint['model_state_dict']
                    else:
                        # 如果直接是state_dict格式
                        state_dict = checkpoint
                    
                    # 检查分类头参数是否匹配
                    head_mismatch = False
                    model_dict = emotion_model.state_dict()
                    for k in ['head.weight', 'head.bias']:
                        if k in state_dict and k in model_dict and state_dict[k].shape != model_dict[k].shape:
                            head_mismatch = True
                            logging.warning(f"分类头参数形状不匹配: {k}, 修改加载策略")
                    
                    if head_mismatch:
                        # 如果分类头不匹配，只加载特征提取部分
                        for k in ['head.weight', 'head.bias']:
                            if k in state_dict:
                                state_dict.pop(k)
                        # 使用strict=False允许缺失参数
                        emotion_model.load_state_dict(state_dict, strict=False)
                        logging.info("加载模型: 使用预训练特征提取器，不包含分类头")
                    else:
                        # 完全匹配的情况
                        emotion_model.load_state_dict(state_dict)
                        logging.info("加载模型: 使用完整预训练模型")
                    
                    logging.info(f"成功加载模型检查点: {checkpoint_path}")
                    
                    emotion_model = emotion_model.to(device)
                    emotion_model.eval()
                    return emotion_model
                    
                except Exception as e:
                    logging.error(f"加载模型检查点失败: {str(e)}")
                    logging.info("将使用未预训练模型")
            else:
                logging.warning(f"模型检查点不存在: {checkpoint_path}，将使用未预训练模型")
                
            emotion_model = emotion_model.to(device)
            emotion_model.eval()
            return emotion_model
            
        except Exception as e:
            logging.error(f"初始化Swin Transformer 3D模型失败: {str(e)}")
            logging.info("将使用模拟模型代替")
            return MockModel()
    
    elif model_type == 'EfficientNet':
        try:
            import timm
            
            # 创建模型实例
            emotion_model = timm.create_model(
                'efficientnet_b0',
                pretrained=False,
                num_classes=len(emotion_labels)
            )
            
            # 加载检查点
            if os.path.exists(checkpoint_path):
                try:
                    # 在非CUDA环境中，确保使用CPU加载模型
                    map_location = device if 'cuda' in device and torch.cuda.is_available() else 'cpu'
                    checkpoint = torch.load(checkpoint_path, map_location=map_location)
                    
                    # 检查checkpoint格式，处理不同格式的state_dict
                    if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
                        # 如果是嵌套的model_state_dict格式
                        state_dict = checkpoint['model_state_dict']
                    else:
                        # 如果直接是state_dict格式
                        state_dict = checkpoint
                    
                    # 检查分类头参数是否匹配
                    head_mismatch = False
                    model_dict = emotion_model.state_dict()
                    for k in ['head.weight', 'head.bias']:
                        if k in state_dict and k in model_dict and state_dict[k].shape != model_dict[k].shape:
                            head_mismatch = True
                            logging.warning(f"分类头参数形状不匹配: {k}, 修改加载策略")
                    
                    if head_mismatch:
                        # 如果分类头不匹配，只加载特征提取部分
                        for k in ['head.weight', 'head.bias']:
                            if k in state_dict:
                                state_dict.pop(k)
                        # 使用strict=False允许缺失参数
                        emotion_model.load_state_dict(state_dict, strict=False)
                        logging.info("加载模型: 使用预训练特征提取器，不包含分类头")
                    else:
                        # 完全匹配的情况
                        emotion_model.load_state_dict(state_dict)
                        logging.info("加载模型: 使用完整预训练模型")
                    
                    logging.info(f"成功加载模型检查点: {checkpoint_path}")
                    
                    emotion_model = emotion_model.to(device)
                    emotion_model.eval()
                    return emotion_model
                    
                except Exception as e:
                    logging.error(f"加载模型检查点失败: {str(e)}")
                    logging.info("将使用未预训练模型")
            else:
                logging.warning(f"模型检查点不存在: {checkpoint_path}，将使用未预训练模型")
                
            emotion_model = emotion_model.to(device)
            emotion_model.eval()
            return emotion_model
            
        except Exception as e:
            logging.error(f"初始化EfficientNet模型失败: {str(e)}")
            logging.info("将使用模拟模型代替")
            return MockModel()
    
    else:  # 'MOCK'或未知类型
        return MockModel()

# 初始化应用配置
config = load_configuration()
emotion_labels = config['dataset']['emotion_labels']

# 初始化人脸检测器
face_detector = initialize_face_detector(config['preprocessing']['face_detector'])

# 初始化模型
emotion_model = initialize_model(config['model'])


def mock_init_app():
    """初始化模拟应用"""
    global emotion_model, device, face_detector, config
    
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    logger = logging.getLogger()
    logger.info("初始化模拟应用")
    
    # 加载配置
    config = load_configuration()
    
    # 初始化设备
    device = config['model']['device']
    
    # 创建模拟人脸检测器
    face_detector = MockFaceDetector()
    
    # 创建模拟情绪识别模型
    emotion_model = MockModel().to(device)
    
    logger.info("模拟应用初始化完成")


def detect_face(image, detector, detector_type='MOCK'):
    """检测人脸"""
    if detector_type == 'MOCK':
        # 返回模拟的人脸框
        h, w = image.shape[:2]
        x1 = w // 4
        y1 = h // 4
        x2 = (w * 3) // 4
        y2 = (h * 3) // 4
        return [(x1, y1, x2, y2)]
    elif detector_type == 'HOG':
        try:
            # 使用dlib的HOG人脸检测器
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = detector(gray, 1)
            face_boxes = []
            for face in faces:
                x1, y1, x2, y2 = face.left(), face.top(), face.right(), face.bottom()
                face_boxes.append([x1, y1, x2, y2])
            return face_boxes
        except Exception as e:
            logging.error(f"人脸检测失败: {str(e)}")
            return []
    return []


def extract_frames(video_path, target_frames=16):
    """从视频中提取指定数量的帧"""
    frames = []
    try:
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            logging.error(f"无法打开视频: {video_path}")
            return frames
        
        # 获取视频的总帧数
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        if total_frames <= 0:
            logging.error(f"视频没有帧: {video_path}")
            return frames
        
        # 计算抽样间隔
        if total_frames <= target_frames:
            # 如果视频帧数少于目标帧数，每帧都保留
            indices = list(range(total_frames))
        else:
            # 均匀采样
            indices = np.linspace(0, total_frames - 1, target_frames, dtype=int)
        
        # 提取帧
        for idx in indices:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if ret:
                # 转为RGB (OpenCV默认是BGR)
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frames.append(frame)
            else:
                logging.warning(f"无法读取视频帧 {idx}")
        
        cap.release()
    except Exception as e:
        logging.error(f"提取视频帧时出错: {str(e)}")
    
    return frames


def preprocess_frame(frame, frame_size):
    """预处理单个帧"""
    if frame is None:
        return None
    
    try:
        # 调整大小
        frame = cv2.resize(frame, (frame_size[1], frame_size[0]))
        
        # 标准化 (0-1)
        frame = frame.astype(np.float32) / 255.0
        
        return frame
    except Exception as e:
        logging.error(f"预处理帧时出错: {str(e)}")
        return None


def detect_faces(frame, detector_type='HOG'):
    """检测人脸，返回人脸区域列表"""
    if frame is None:
        return []
    
    try:
        # 确保帧是三通道彩色图像
        if len(frame.shape) == 2:
            # 如果是灰度，转为RGB
            frame_for_detection = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
        elif len(frame.shape) == 3 and frame.shape[2] == 3:
            frame_for_detection = frame.copy()
        else:
            logging.warning(f"不支持的帧格式: {frame.shape}")
            return []
        
        # 转换浮点数帧为uint8（如果需要）
        if frame_for_detection.dtype == np.float32 or frame_for_detection.dtype == np.float64:
            frame_for_detection = (frame_for_detection * 255).astype(np.uint8)
        
        # 处理不同类型的人脸检测器
        if detector_type == 'HOG':
            # dlib HOG检测器
            if hasattr(face_detector, '__call__') and not hasattr(face_detector, 'detectMultiScale'):
                faces = face_detector(frame_for_detection)
                return [(face.left(), face.top(), face.right(), face.bottom()) for face in faces]
                
        elif detector_type == 'OPENCV':
            # OpenCV Haar级联检测器
            if hasattr(face_detector, 'detectMultiScale'):
                gray = cv2.cvtColor(frame_for_detection, cv2.COLOR_RGB2GRAY)
                faces = face_detector.detectMultiScale(gray, 1.1, 4)
                return [(x, y, x + w, y + h) for (x, y, w, h) in faces]
        
        # 模拟检测器或未知类型
        if hasattr(face_detector, '__call__'):
            faces = face_detector(frame_for_detection)
            if isinstance(faces, list) and len(faces) > 0:
                if hasattr(faces[0], 'left'):
                    # dlib类型的矩形
                    return [(face.left(), face.top(), face.right(), face.bottom()) for face in faces]
                elif isinstance(faces[0], (list, tuple)) and len(faces[0]) == 4:
                    # 直接返回(x1, y1, x2, y2)元组
                    return faces
                elif isinstance(faces, np.ndarray) and faces.shape[1] == 4:
                    # OpenCV格式的矩形 [x, y, w, h]
                    return [(x, y, x + w, y + h) for (x, y, w, h) in faces]
        
        logging.warning(f"无法从检测器获取人脸: {type(face_detector)}")
        return []
        
    except Exception as e:
        logging.error(f"人脸检测时出错: {str(e)}")
        return []


def crop_face(frame, face_rect, padding=0.2):
    """裁剪人脸区域，添加一些填充"""
    if frame is None or not face_rect or len(face_rect) != 4:
        return None
    
    try:
        x1, y1, x2, y2 = face_rect
        height, width = frame.shape[:2]
        
        # 计算填充
        h = y2 - y1
        w = x2 - x1
        p_h = int(h * padding)
        p_w = int(w * padding)
        
        # 应用填充并确保边界在图像内
        y1 = max(0, y1 - p_h)
        y2 = min(height, y2 + p_h)
        x1 = max(0, x1 - p_w)
        x2 = min(width, x2 + p_w)
        
        # 裁剪人脸
        face_crop = frame[y1:y2, x1:x2]
        return face_crop
    
    except Exception as e:
        logging.error(f"裁剪人脸时出错: {str(e)}")
        return None


def preprocess_video(video_path, frame_size=(224, 224), num_frames=16):
    """预处理视频以准备微表情识别"""
    try:
        # 检查文件是否存在
        if not os.path.exists(video_path):
            logging.error(f"文件不存在: {video_path}")
            # 创建一个空帧
            empty_frame = np.zeros((*frame_size, 3), dtype=np.float32)
            processed_frames = [empty_frame] * num_frames
            clip = np.array(processed_frames).transpose(3, 0, 1, 2)
            return torch.from_numpy(clip).unsqueeze(0)
        
        # 提取帧
        frames = extract_frames(video_path, num_frames)
        if not frames:
            logging.error(f"无法从视频中提取帧: {video_path}")
            # 创建一个空帧
            empty_frame = np.zeros((*frame_size, 3), dtype=np.float32)
            processed_frames = [empty_frame] * num_frames
            clip = np.array(processed_frames).transpose(3, 0, 1, 2)
            return torch.from_numpy(clip).unsqueeze(0)
        
        # 检测第一帧中的人脸
        face_rects = detect_faces(frames[0], config['preprocessing']['face_detector'])
        if not face_rects:
            logging.warning("未检测到人脸")
            # 如果没有检测到人脸，默认使用整个帧
            processed_frames = [preprocess_frame(frame, frame_size) for frame in frames]
        else:
            # 使用第一个检测到的人脸
            face_rect = face_rects[0]
            # 对所有帧应用相同的裁剪区域
            face_frames = [crop_face(frame, face_rect) for frame in frames]
            # 预处理每一帧
            processed_frames = [preprocess_frame(frame, frame_size) for frame in face_frames if frame is not None]
        
        # 确保我们有足够的帧
        if len(processed_frames) < num_frames:
            logging.warning(f"帧数不足: {len(processed_frames)}/{num_frames}")
            # 复制最后一帧以达到所需的帧数
            while len(processed_frames) < num_frames:
                processed_frames.append(processed_frames[-1] if processed_frames else np.zeros(frame_size + (3,)))
        
        # 转换为tensor
        try:
            # [T, H, W, C] -> [C, T, H, W]
            clip = np.array(processed_frames).transpose(3, 0, 1, 2)
            clip_tensor = torch.from_numpy(clip).unsqueeze(0)  # 添加批次维度
            return clip_tensor
        except Exception as e:
            logging.error(f"转换为tensor时出错: {str(e)}")
            # 创建一个空tensor
            empty_frame = np.zeros((*frame_size, 3), dtype=np.float32)
            processed_frames = [empty_frame] * num_frames
            clip = np.array(processed_frames).transpose(3, 0, 1, 2)
            return torch.from_numpy(clip).unsqueeze(0)
            
    except Exception as e:
        logging.error(f"预处理视频时出错: {str(e)}")
        # 创建一个空tensor
        empty_frame = np.zeros((*frame_size, 3), dtype=np.float32)
        processed_frames = [empty_frame] * num_frames
        clip = np.array(processed_frames).transpose(3, 0, 1, 2)
        return torch.from_numpy(clip).unsqueeze(0)


def predict_emotion(video_path):
    """预测视频中的情绪"""
    global emotion_model, device
    
    if emotion_model is None:
        logging.error("模型未初始化")
        return {
            "error": "模型未初始化",
            "emotions": [],
            "timeline_emotions": []
        }
    
    try:
        # 确保在没有CUDA可用时使用CPU
        if 'cuda' in device and not torch.cuda.is_available():
            device = 'cpu'
            logging.warning("配置指定CUDA但系统不支持，将使用CPU代替")
        
        # 预处理视频数据
        frame_size = config['dataset']['frame_size']
        num_frames = config['dataset']['num_frames']
        input_tensor = preprocess_video(video_path, frame_size, num_frames)
        
        if input_tensor is None:
            return {
                "error": "处理视频时出错",
                "emotions": [],  # 确保即使出错也有emotions空数组
                "timeline_emotions": []  # 添加时间线情绪数据
            }
        
        # 移动到正确的设备
        input_tensor = input_tensor.to(device)
        
        # 进行预测
        with torch.no_grad():
            try:
                output = emotion_model(input_tensor)
                
                # 获取预测结果
                if isinstance(output, torch.Tensor):
                    probabilities = torch.nn.functional.softmax(output, dim=1)[0]
                    probabilities = probabilities.cpu().numpy()
                else:
                    # 如果模型直接返回概率
                    probabilities = output[0] if isinstance(output, list) else output.cpu().numpy()[0]
                
                # 构建结果
                emotions = []
                for i, prob in enumerate(probabilities):
                    if i < len(emotion_labels):
                        emotions.append({
                            "label": emotion_labels[i],
                            "probability": float(prob)
                        })
                
                # 按概率降序排序
                emotions.sort(key=lambda x: x["probability"], reverse=True)
                
                # 安全地生成时间线情绪数据
                try:
                    timeline_emotions = generate_timeline_emotions(video_path)
                    if timeline_emotions is None:
                        timeline_emotions = []
                except Exception as timeline_error:
                    logging.error(f"生成时间线数据时出错: {str(timeline_error)}")
                    timeline_emotions = []
                
                # 返回修改后的格式
                return {
                    "emotions": emotions,
                    "top_emotion": emotions[0]["label"] if emotions else None,
                    "timeline_emotions": timeline_emotions  # 添加时间线情绪数据
                }
            except Exception as pred_error:
                logging.error(f"模型预测时出错: {str(pred_error)}")
                return {
                    "error": f"模型预测时出错: {str(pred_error)}",
                    "emotions": [],
                    "timeline_emotions": []
                }
            
    except Exception as e:
        logging.error(f"预测时出错: {str(e)}")
        return {
            "error": f"预测时出错: {str(e)}",
            "emotions": [],  # 确保即使出错也有emotions空数组
            "timeline_emotions": []  # 添加时间线情绪数据
        }

def generate_timeline_emotions(video_path):
    """生成视频不同时间点的情绪分析结果"""
    try:
        # 获取视频时长
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            logging.error(f"无法打开视频: {video_path}")
            return []
            
        fps = cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = total_frames / fps if fps > 0 else 0
        
        # 释放视频
        cap.release()
        
        if duration <= 0:
            logging.error(f"视频时长无效: {duration}秒")
            return []
            
        # 每段时长(秒)
        segment_length = 1.0  # 每1秒分析一次
        
        # 计算需要分析的时间点
        num_segments = min(10, max(3, int(duration / segment_length)))  # 至少3个时间点，最多10个
        
        # 创建均匀分布的时间点
        timestamps = np.linspace(0, duration, num_segments)
        
        timeline_data = []
        frame_size = config['dataset']['frame_size']
        num_frames_per_segment = config['dataset']['num_frames']
        
        for i, timestamp in enumerate(timestamps):
            try:
                # 为每个时间点创建分析结果
                segment_emotions = analyze_video_segment(video_path, timestamp, segment_length, frame_size, num_frames_per_segment)
                
                # 确保segment_emotions不为空
                if segment_emotions and len(segment_emotions) > 0:
                    timeline_data.append({
                        "timestamp": float(timestamp),
                        "emotions": segment_emotions,
                        "top_emotion": segment_emotions[0]["label"] if segment_emotions else None
                    })
                else:
                    # 如果没有返回有效情绪，添加一个占位符
                    timeline_data.append({
                        "timestamp": float(timestamp),
                        "emotions": [],
                        "top_emotion": None
                    })
            except Exception as e:
                logging.error(f"处理时间戳 {timestamp} 时出错: {str(e)}")
                # 发生错误时也添加一个占位符
                timeline_data.append({
                    "timestamp": float(timestamp),
                    "emotions": [],
                    "top_emotion": None
                })
        
        return timeline_data
            
    except Exception as e:
        logging.error(f"生成时间线情绪数据时出错: {str(e)}")
        return []

def analyze_video_segment(video_path, start_time, segment_length, frame_size, num_frames):
    """分析视频特定时间段的微表情"""
    try:
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            logging.error(f"无法打开视频: {video_path}")
            return []
            
        fps = cap.get(cv2.CAP_PROP_FPS)
        
        # 计算起始帧和结束帧
        start_frame = int(start_time * fps)
        end_frame = min(int((start_time + segment_length) * fps), int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))
        
        # 提取该时间段的帧
        segment_frames = []
        cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
        
        # 计算要提取的帧索引
        if end_frame - start_frame <= num_frames:
            # 如果段内帧数少于要求的帧数，全部提取
            indices = range(start_frame, end_frame)
        else:
            # 均匀采样
            indices = np.linspace(start_frame, end_frame - 1, num_frames, dtype=int)
            
        # 提取帧
        for idx in indices:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if ret:
                # 转为RGB (OpenCV默认是BGR)
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                segment_frames.append(frame)
                
        cap.release()
        
        if not segment_frames:
            logging.warning(f"时间段 {start_time}-{start_time + segment_length} 秒没有提取到帧")
            return []
            
        # 检测第一帧中的人脸
        face_rects = detect_faces(segment_frames[0], config['preprocessing']['face_detector'])
        if not face_rects:
            # 如果没有检测到人脸，默认使用整个帧
            processed_frames = [preprocess_frame(frame, frame_size) for frame in segment_frames]
        else:
            # 使用第一个检测到的人脸
            face_rect = face_rects[0]
            # 对所有帧应用相同的裁剪区域
            face_frames = [crop_face(frame, face_rect) for frame in segment_frames]
            # 预处理每一帧
            processed_frames = [preprocess_frame(frame, frame_size) for frame in face_frames if frame is not None]
            
        # 确保我们有足够的帧
        while len(processed_frames) < num_frames:
            if processed_frames:
                processed_frames.append(processed_frames[-1])
            else:
                # 创建空帧
                empty_frame = np.zeros((*frame_size, 3), dtype=np.float32)
                processed_frames.append(empty_frame)
                
        # 转换为tensor并进行预测
        try:
            clip = np.array(processed_frames).transpose(3, 0, 1, 2)
            input_tensor = torch.from_numpy(clip).unsqueeze(0).to(device)  # 添加批次维度
            
            # 预测该时间段的情绪
            with torch.no_grad():
                output = emotion_model(input_tensor)
                
                # 获取预测结果
                if isinstance(output, torch.Tensor):
                    probabilities = torch.nn.functional.softmax(output, dim=1)[0]
                    probabilities = probabilities.cpu().numpy()
                else:
                    probabilities = output[0] if isinstance(output, list) else output.cpu().numpy()[0]
                    
                # 构建结果
                emotions = []
                for i, prob in enumerate(probabilities):
                    if i < len(emotion_labels):
                        emotions.append({
                            "label": emotion_labels[i],
                            "probability": float(prob)
                        })
                        
                # 按概率降序排序
                emotions.sort(key=lambda x: x["probability"], reverse=True)
                
                return emotions
        except Exception as e:
            logging.error(f"分析视频段时tensor转换或预测出错: {str(e)}")
            return []
            
    except Exception as e:
        logging.error(f"分析视频段时出错: {str(e)}")
        return []

def save_result_video(video_path, emotion_idx, confidence):
    """保存带有预测结果的视频"""
    result_id = str(uuid.uuid4())
    result_path = os.path.join(app.root_path, app.config['RESULT_FOLDER'], f"{result_id}.mp4")
    
    try:
        import cv2
        # 读取原始视频
        cap = cv2.VideoCapture(video_path)
        
        # 获取视频信息
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        
        # 创建视频写入器
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(result_path, fourcc, fps, (width, height))
        
        # 处理每一帧
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            
            # 在帧上添加预测结果
            emotion_text = emotion_labels[emotion_idx]
            cv2.putText(frame, f"{emotion_text} ({confidence:.2f})", (20, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            
            # 写入帧
            out.write(frame)
        
        # 释放资源
        cap.release()
        out.release()
    except Exception as e:
        logging.error(f"保存结果视频失败: {str(e)}")
        # 对于演示，如果视频处理失败，创建一个空文件
        with open(result_path, 'w') as f:
            f.write('')
    
    return result_id


@app.route('/')
def index():
    """返回首页"""
    return render_template('index.html')


@app.route('/about')
def about():
    """返回关于页面"""
    return render_template('about.html')


@app.route('/uploads/<path:filename>')
def get_uploaded_file(filename):
    """获取上传的文件"""
    # 直接使用配置中的路径可能导致静态文件目录嵌套问题
    # 使用app.config中的UPLOAD_FOLDER，它已经包含了正确的静态目录路径
    logging.info(f"获取上传的文件: {filename}, 路径: {app.config['UPLOAD_FOLDER']}")
    return send_from_directory(app.config['UPLOAD_FOLDER'], filename)


@app.route('/api/emotions')
def get_emotions():
    """返回可用的情绪标签"""
    return jsonify({
        'emotions': emotion_labels
    })


@app.route('/process_video', methods=['POST'])
def process_video():
    """处理上传的视频并返回微表情预测结果"""
    if 'video' not in request.files:
        return jsonify({"error": "未提供视频文件"}), 400
    
    file = request.files['video']
    if file.filename == '':
        return jsonify({"error": "未选择文件"}), 400
    
    # 确保上传目录存在
    upload_folder = config['webapp']['upload_folder']
    if not os.path.exists(upload_folder):
        os.makedirs(upload_folder)
    
    # 检查文件扩展名
    allowed_extensions = config['webapp']['allowed_extensions']
    if not file.filename.split('.')[-1].lower() in allowed_extensions:
        return jsonify({"error": f"不支持的文件类型，允许的类型: {', '.join(allowed_extensions)}"}), 400
    
    try:
        # 保存上传的文件
        filename = secure_filename(file.filename)
        file_path = os.path.join(upload_folder, filename)
        file.save(file_path)
        
        # 处理视频并获取预测结果
        result = predict_emotion(file_path)
        
        # 可选：删除上传的文件以节省空间
        # os.remove(file_path)
        
        return jsonify(result)
        
    except Exception as e:
        logging.error(f"处理视频时出错: {str(e)}")
        return jsonify({"error": f"处理视频时出错: {str(e)}"}), 500


@app.route('/results/<result_id>')
def get_result(result_id):
    """获取结果视频"""
    return send_from_directory(os.path.join(app.root_path, app.config['RESULT_FOLDER']), f"{result_id}.mp4")


@app.route('/api/detect', methods=['POST'])
def detect_emotions():
    """检测微表情"""
    # 检查是否有文件
    if 'file' not in request.files:
        return jsonify({"error": "未收到文件"}), 400
    
    file = request.files['file']
    
    # 检查文件名
    if file.filename == '':
        return jsonify({"error": "未选择文件"}), 400
    
    # 检查文件类型
    if not allowed_file(file.filename):
        return jsonify({"error": "不支持的文件类型"}), 400
    
    # 保存文件
    filename = secure_filename(file.filename)
    from datetime import datetime
    import uuid
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    unique_filename = f"{timestamp}_{uuid.uuid4().hex}_{filename}"
    
    # 使用app.config中的UPLOAD_FOLDER，确保文件保存在正确的位置
    filepath = os.path.join(app.config['UPLOAD_FOLDER'], unique_filename)
    logging.info(f"保存上传文件到: {filepath}")
    file.save(filepath)
    
    try:
        # 获取视频信息
        cap = cv2.VideoCapture(filepath)
        if cap.isOpened():
            fps = cap.get(cv2.CAP_PROP_FPS)
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            duration = total_frames / fps if fps > 0 else 0
            cap.release()
        else:
            duration = 0
        
        # 预测情绪
        result = predict_emotion(filepath)
        # 添加视频信息
        result['video_info'] = {
            'duration': duration,
            'fps': fps if 'fps' in locals() else 0
        }
        # 添加缩略图URL - 确保路径格式正确
        result['thumbnail_url'] = f"/uploads/{unique_filename}"
        # 添加日志，便于调试
        logging.info(f"处理完成，返回结果: {result}")
        return jsonify(result)
    except Exception as e:
        logging.error(f"处理文件时出错: {e}")
        return jsonify({"error": f"处理文件时出错: {str(e)}"}), 500


@app.route('/api/analyze_frame', methods=['POST'])
def analyze_frame():
    """分析单个视频帧的微表情"""
    global emotion_model, device, face_detector, emotion_cache
    
    if 'frame' not in request.files:
        return jsonify({"error": "未收到帧图像"}), 400
    
    file = request.files['frame']
    timestamp = request.form.get('timestamp', 0)
    
    try:
        # 读取上传的帧图像
        frame_data = file.read()
        np_arr = np.frombuffer(frame_data, np.uint8)
        frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
        
        if frame is None:
            return jsonify({"error": "无法解码帧图像"}), 400
        
        # 转换为RGB (OpenCV默认是BGR)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        # 检测人脸
        face_rects = detect_faces(frame, config['preprocessing']['face_detector'])
        
        # 处理帧图像
        frame_size = config['dataset']['frame_size']
        if not face_rects:
            # 如果没有检测到人脸，使用整个帧
            processed_frame = preprocess_frame(frame, frame_size)
        else:
            # 使用第一个检测到的人脸
            face_rect = face_rects[0]
            face_frame = crop_face(frame, face_rect)
            processed_frame = preprocess_frame(face_frame, frame_size) if face_frame is not None else preprocess_frame(frame, frame_size)
        
        if processed_frame is None:
            return jsonify({"error": "处理帧图像失败"}), 500
        
        # 创建一个假的视频序列（多个相同帧）
        num_frames = config['dataset']['num_frames']
        frames_sequence = [processed_frame] * num_frames  # 复制当前帧形成序列
        
        # 转换为模型输入格式
        try:
            # [T, H, W, C] -> [C, T, H, W]
            clip = np.array(frames_sequence).transpose(3, 0, 1, 2)
            input_tensor = torch.from_numpy(clip).unsqueeze(0).to(device)  # 添加批次维度
        except Exception as e:
            logging.error(f"转换为tensor时出错: {str(e)}")
            return jsonify({"error": f"转换为tensor时出错: {str(e)}"}), 500
        
        # 使用模型预测
        with torch.no_grad():
            try:
                output = emotion_model(input_tensor)
                
                # 获取预测结果
                if isinstance(output, torch.Tensor):
                    probabilities = torch.nn.functional.softmax(output, dim=1)[0]
                    probabilities = probabilities.cpu().numpy()
                else:
                    probabilities = output[0] if isinstance(output, list) else output.cpu().numpy()[0]
                
                # 构建结果
                emotions = []
                for i, prob in enumerate(probabilities):
                    if i < len(emotion_labels):
                        emotions.append({
                            "label": emotion_labels[i],
                            "probability": float(prob)
                        })
                
                # 按概率降序排序
                emotions.sort(key=lambda x: x["probability"], reverse=True)
                
                # 获取当前帧的最可能情绪
                current_top_emotion = emotions[0]["label"] if emotions else None
                
                # 将当前结果添加到缓存
                if current_top_emotion:
                    emotion_cache["last_emotions"].append(current_top_emotion)
                    # 保持缓存大小
                    if len(emotion_cache["last_emotions"]) > emotion_cache["cache_size"]:
                        emotion_cache["last_emotions"].pop(0)
                
                # 统计缓存中出现最多的情绪
                if emotion_cache["last_emotions"]:
                    from collections import Counter
                    emotion_counter = Counter(emotion_cache["last_emotions"])
                    smooth_top_emotion = emotion_counter.most_common(1)[0][0]
                else:
                    smooth_top_emotion = current_top_emotion
                
                # 返回结果
                return jsonify({
                    "emotions": emotions,
                    "top_emotion": smooth_top_emotion,
                    "raw_top_emotion": current_top_emotion,
                    "timestamp": timestamp
                })
                
            except Exception as e:
                logging.error(f"模型预测时出错: {str(e)}")
                return jsonify({"error": f"模型预测时出错: {str(e)}"}), 500
                
    except Exception as e:
        logging.error(f"分析帧时出错: {str(e)}")
        return jsonify({"error": f"分析帧时出错: {str(e)}"}), 500


if __name__ == '__main__':
    # 初始化真实模型应用
    try:
        # 定义init_real_model函数
        def init_real_model():
            """初始化真实模型应用"""
            global emotion_model, device, face_detector, config
            logging.basicConfig(
                level=logging.INFO,
                format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            logger = logging.getLogger()
            logger.info("初始化真实模型应用")
            
            # 加载配置
            config = load_configuration()
            # 初始化人脸检测器
            face_detector = initialize_face_detector(config['preprocessing']['face_detector'])
            # 初始化模型
            emotion_model = initialize_model(config['model'])
            
            logger.info("真实模型应用初始化完成")
        
        init_real_model()
    except Exception as e:
        logging.error(f"加载真实模型失败，回退到模拟模型: {str(e)}")
        mock_init_app()
    
    # 创建必要的目录
    upload_folder = config['webapp']['upload_folder']
    if not os.path.exists(upload_folder):
        os.makedirs(upload_folder)
    
    # 启动应用
    app.run(
        host=config['webapp']['host'],
        port=config['webapp']['port'],
        debug=config['webapp']['debug']
    ) 