#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
微表情识别系统Web应用
"""

import os
import logging
import time
import uuid
import torch
import cv2
import numpy as np
from flask import Flask, render_template, request, jsonify, send_from_directory, url_for
from werkzeug.utils import secure_filename

# 导入自定义模块
from ..utils.config import load_config
from ..utils.logger import setup_logger
from ..models.model_factory import create_model
from ..preprocessing.preprocess import detect_face, extract_roi, compute_optical_flow

# 创建Flask应用
app = Flask(__name__, template_folder='templates', static_folder='static')

# 配置文件上传
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov', 'webm'}
RESULT_FOLDER = 'results'

# 确保上传和结果目录存在
os.makedirs(os.path.join(app.root_path, UPLOAD_FOLDER), exist_ok=True)
os.makedirs(os.path.join(app.root_path, RESULT_FOLDER), exist_ok=True)

app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['RESULT_FOLDER'] = RESULT_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 32 * 1024 * 1024  # 32MB限制

# 全局变量
model = None
device = None
face_detector = None
config = None
emotion_labels = ['生气', '厌恶', '恐惧', '高兴', '悲伤', '惊讶', '中性']


def allowed_file(filename):
    """检查文件是否允许上传"""
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS


def extract_frames(video_path, num_frames=16):
    """从视频中提取帧"""
    frames = []
    cap = cv2.VideoCapture(video_path)
    
    # 获取视频总帧数
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    
    if total_frames <= 0:
        return []
    
    # 计算采样间隔
    if total_frames <= num_frames:
        # 如果总帧数少于所需帧数，则使用所有帧
        indices = list(range(total_frames))
    else:
        # 否则，均匀采样
        indices = np.linspace(0, total_frames - 1, num_frames, dtype=int)
    
    for i in indices:
        cap.set(cv2.CAP_PROP_POS_FRAMES, i)
        ret, frame = cap.read()
        if ret:
            frames.append(frame)
    
    cap.release()
    return frames


def preprocess_frames(frames, face_detector_type='MTCNN'):
    """预处理视频帧"""
    processed_frames = []
    
    # 确保有足够的帧
    if len(frames) < 2:
        return []
    
    # 裁剪并调整大小
    frame_size = config.get('dataset', {}).get('frame_size', [224, 224])
    
    for i, frame in enumerate(frames):
        # 检测人脸
        face_boxes = detect_face(frame, face_detector, face_detector_type)
        
        if not face_boxes:
            continue
        
        # 使用最大的人脸
        face_box = max(face_boxes, key=lambda box: (box[2] - box[0]) * (box[3] - box[1]))
        
        # 裁剪人脸区域
        x1, y1, x2, y2 = face_box
        face_img = frame[y1:y2, x1:x2]
        
        # 调整大小
        face_img = cv2.resize(face_img, (frame_size[1], frame_size[0]))
        
        # 转换为RGB
        face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
        
        processed_frames.append(face_img)
    
    # 确保帧数一致
    target_frames = config.get('dataset', {}).get('num_frames', 16)
    if len(processed_frames) > target_frames:
        # 采样到目标帧数
        indices = np.linspace(0, len(processed_frames) - 1, target_frames, dtype=int)
        processed_frames = [processed_frames[i] for i in indices]
    elif len(processed_frames) < target_frames:
        # 不足则通过重复最后一帧补齐
        last_frame = processed_frames[-1] if processed_frames else None
        if last_frame is not None:
            processed_frames.extend([last_frame] * (target_frames - len(processed_frames)))
        else:
            return []
    
    return processed_frames


def predict_emotion(processed_frames):
    """预测微表情"""
    global model, device
    
    if not processed_frames:
        return None, None
    
    # 将处理后的帧转换为tensor
    # [N, H, W, C] -> [1, C, N, H, W]
    frames_np = np.stack(processed_frames)
    frames_tensor = torch.from_numpy(frames_np).float()
    frames_tensor = frames_tensor.permute(0, 3, 1, 2)  # [N, H, W, C] -> [N, C, H, W]
    frames_tensor = frames_tensor.unsqueeze(0)  # 添加批次维度
    
    # 将tensor移到设备上
    frames_tensor = frames_tensor.to(device)
    
    # 设置模型为评估模式
    model.eval()
    
    # 预测
    with torch.no_grad():
        outputs = model(frames_tensor)
        probs = torch.softmax(outputs, dim=1)
        
        # 获取最高概率的类别
        _, predicted = torch.max(outputs, 1)
        confidence = probs[0, predicted].item()
        
        emotion_idx = predicted.item()
        
        # 获取所有概率
        all_probs = probs[0].cpu().numpy()
    
    return emotion_idx, all_probs


def save_result_video(video_path, emotion_idx, confidence):
    """保存带有预测结果的视频"""
    result_id = str(uuid.uuid4())
    result_path = os.path.join(app.root_path, app.config['RESULT_FOLDER'], f"{result_id}.mp4")
    
    # 读取原始视频
    cap = cv2.VideoCapture(video_path)
    
    # 获取视频信息
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    
    # 创建视频写入器
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(result_path, fourcc, fps, (width, height))
    
    # 处理每一帧
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        # 在帧上添加预测结果
        emotion_text = emotion_labels[emotion_idx]
        cv2.putText(frame, f"{emotion_text} ({confidence:.2f})", (20, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
        
        # 写入帧
        out.write(frame)
    
    # 释放资源
    cap.release()
    out.release()
    
    return result_id


def init_app(config_path='configs/default.yaml'):
    """初始化应用"""
    global model, device, face_detector, config
    
    # 加载配置
    config = load_config(config_path)
    
    # 设置日志
    log_file = os.path.join(app.root_path, 'app.log')
    setup_logger(log_file, verbose=True)
    logger = logging.getLogger()
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f"使用设备: {device}")
    
    # 加载模型
    logger.info("正在加载模型...")
    model = create_model(config)
    
    # 加载模型权重
    checkpoint_path = config.get('checkpoint', 'checkpoints/best_model.pth')
    if os.path.exists(checkpoint_path):
        logger.info(f"从检查点加载权重: {checkpoint_path}")
        checkpoint = torch.load(checkpoint_path, map_location=device)
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        logger.warning(f"检查点文件不存在: {checkpoint_path}，使用随机初始化的权重")
    
    # 设置模型为评估模式
    model.eval()
    
    # 加载人脸检测器
    from ..preprocessing.preprocess import load_face_detector
    face_detector_type = config.get('preprocessing', {}).get('face_detector', 'MTCNN')
    logger.info(f"正在加载人脸检测器: {face_detector_type}")
    face_detector = load_face_detector(face_detector_type)
    
    logger.info("应用初始化完成")


@app.route('/')
def index():
    """主页"""
    return render_template('index.html')


@app.route('/upload', methods=['POST'])
def upload_file():
    """处理文件上传"""
    # 检查是否有文件
    if 'file' not in request.files:
        return jsonify({'error': '没有文件'}), 400
    
    file = request.files['file']
    
    # 检查文件名
    if file.filename == '':
        return jsonify({'error': '没有选择文件'}), 400
    
    # 检查文件类型
    if not allowed_file(file.filename):
        return jsonify({'error': f'不支持的文件类型，请上传 {", ".join(ALLOWED_EXTENSIONS)} 格式的文件'}), 400
    
    # 保存文件
    filename = secure_filename(file.filename)
    file_id = f"{int(time.time())}_{uuid.uuid4().hex[:8]}"
    file_path = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], f"{file_id}_{filename}")
    file.save(file_path)
    
    # 处理视频
    try:
        # 提取帧
        frames = extract_frames(file_path)
        
        if not frames:
            return jsonify({'error': '无法从视频中提取帧'}), 400
        
        # 预处理帧
        face_detector_type = config.get('preprocessing', {}).get('face_detector', 'MTCNN')
        processed_frames = preprocess_frames(frames, face_detector_type)
        
        if not processed_frames:
            return jsonify({'error': '无法检测到人脸'}), 400
        
        # 预测微表情
        emotion_idx, all_probs = predict_emotion(processed_frames)
        
        if emotion_idx is None:
            return jsonify({'error': '预测失败'}), 500
        
        # 保存结果视频
        result_id = save_result_video(file_path, emotion_idx, all_probs[emotion_idx])
        
        # 返回结果
        result = {
            'emotion': emotion_labels[emotion_idx],
            'confidence': float(all_probs[emotion_idx]),
            'all_probs': {emotion_labels[i]: float(all_probs[i]) for i in range(len(emotion_labels))},
            'result_video': url_for('get_result', result_id=result_id)
        }
        
        return jsonify(result)
    
    except Exception as e:
        logging.error(f"处理视频时出错: {str(e)}")
        return jsonify({'error': f'处理视频时出错: {str(e)}'}), 500
    finally:
        # 清理上传的文件
        try:
            os.remove(file_path)
        except:
            pass


@app.route('/results/<result_id>')
def get_result(result_id):
    """获取结果视频"""
    return send_from_directory(os.path.join(app.root_path, app.config['RESULT_FOLDER']), f"{result_id}.mp4")


@app.route('/about')
def about():
    """关于页面"""
    return render_template('about.html')


if __name__ == '__main__':
    # 初始化应用
    init_app()
    
    # 运行应用
    app.run(host='0.0.0.0', port=5000, debug=True) 