#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
数据预处理模块
"""

import os
import logging
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from facenet_pytorch import MTCNN
import dlib
from PIL import Image
import shutil


def preprocess_dataset(config):
    """
    预处理数据集
    
    Args:
        config (dict): 配置字典
    """
    logger = logging.getLogger()
    logger.info("开始数据预处理...")
    
    # 获取预处理配置
    data_dir = config.get('dataset', {}).get('data_dir', './data')
    dataset_name = config.get('dataset', {}).get('name', 'CASME2')
    save_processed = config.get('preprocessing', {}).get('save_processed_data', True)
    face_detector_type = config.get('preprocessing', {}).get('face_detector', 'MTCNN')
    roi_method = config.get('preprocessing', {}).get('roi_method', 'facial_landmarks')
    extract_lbp = config.get('preprocessing', {}).get('extract_lbp', True)
    optical_flow_method = config.get('preprocessing', {}).get('optical_flow', 'farneback')
    
    # 创建输出目录
    processed_dir = os.path.join(data_dir, f"processed_{dataset_name}")
    if os.path.exists(processed_dir) and save_processed:
        logger.info(f"预处理目录已存在: {processed_dir}，将被覆盖")
        shutil.rmtree(processed_dir)
    
    if save_processed:
        os.makedirs(processed_dir, exist_ok=True)
        logger.info(f"创建预处理目录: {processed_dir}")
    
    # 加载人脸检测器
    face_detector = load_face_detector(face_detector_type)
    
    # 加载面部关键点检测器
    if roi_method == 'facial_landmarks':
        landmark_detector = load_landmark_detector()
    else:
        landmark_detector = None
    
    # 处理不同数据集
    raw_data_dir = os.path.join(data_dir, dataset_name)
    if not os.path.exists(raw_data_dir):
        logger.error(f"数据集目录不存在: {raw_data_dir}")
        return
    
    if dataset_name == 'CASME2':
        process_casme2(raw_data_dir, processed_dir, face_detector, landmark_detector, 
                       config, save_processed)
    elif dataset_name == 'SMIC':
        process_smic(raw_data_dir, processed_dir, face_detector, landmark_detector, 
                     config, save_processed)
    elif dataset_name == 'SAMM':
        process_samm(raw_data_dir, processed_dir, face_detector, landmark_detector, 
                     config, save_processed)
    else:
        logger.error(f"不支持的数据集: {dataset_name}")
    
    logger.info("数据预处理完成")


def load_face_detector(detector_type):
    """
    加载人脸检测器
    
    Args:
        detector_type (str): 检测器类型，'MTCNN'或'Dlib'
    
    Returns:
        Object: 人脸检测器
    """
    logger = logging.getLogger()
    
    if detector_type.upper() == 'MTCNN':
        logger.info("加载MTCNN人脸检测器...")
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        detector = MTCNN(
            keep_all=True,
            device=device,
            select_largest=True,
            min_face_size=60,
            post_process=False
        )
        return detector
    
    elif detector_type.upper() == 'DLIB':
        logger.info("加载Dlib人脸检测器...")
        detector = dlib.get_frontal_face_detector()
        return detector
    
    else:
        logger.error(f"不支持的人脸检测器类型: {detector_type}")
        return None


def load_landmark_detector():
    """
    加载面部关键点检测器
    
    Returns:
        dlib.shape_predictor: 关键点检测器
    """
    logger = logging.getLogger()
    logger.info("加载面部关键点检测器...")
    
    # 确保模型文件存在
    model_path = os.path.join('models', 'shape_predictor_68_face_landmarks.dat')
    
    if not os.path.exists(model_path):
        # 如果模型不存在，创建目录并提示下载
        os.makedirs('models', exist_ok=True)
        logger.error(f"面部关键点模型不存在: {model_path}")
        logger.info("请从 http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 下载并解压到models目录")
        return None
    
    # 加载模型
    try:
        predictor = dlib.shape_predictor(model_path)
        return predictor
    except Exception as e:
        logger.error(f"加载关键点检测器失败: {str(e)}")
        return None


def detect_face(image, detector, detector_type):
    """
    检测人脸
    
    Args:
        image (numpy.ndarray): 输入图像
        detector: 人脸检测器
        detector_type (str): 检测器类型
    
    Returns:
        list: 人脸边界框列表
    """
    if detector is None:
        return []
    
    if detector_type.upper() == 'MTCNN':
        # 转换为PIL图像
        if isinstance(image, np.ndarray):
            image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        else:
            image_pil = image
        
        # 检测人脸
        boxes, _, _ = detector.detect(image_pil, landmarks=True)
        
        if boxes is None:
            return []
        
        # 转换为整数坐标
        boxes = boxes.astype(int).tolist()
        return boxes
    
    elif detector_type.upper() == 'DLIB':
        # 确保图像是灰度图
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image
        
        # 检测人脸
        faces = detector(gray, 1)
        
        # 转换为边界框列表
        boxes = []
        for face in faces:
            boxes.append([face.left(), face.top(), face.right(), face.bottom()])
        
        return boxes
    
    else:
        return []


def detect_landmarks(image, face_box, landmark_detector):
    """
    检测面部关键点
    
    Args:
        image (numpy.ndarray): 输入图像
        face_box (list): 人脸边界框 [x1, y1, x2, y2]
        landmark_detector: 关键点检测器
    
    Returns:
        numpy.ndarray: 68个关键点坐标
    """
    if landmark_detector is None:
        return None
    
    # 确保图像是灰度图
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image
    
    # 转换为dlib矩形
    rect = dlib.rectangle(
        left=int(face_box[0]),
        top=int(face_box[1]),
        right=int(face_box[2]),
        bottom=int(face_box[3])
    )
    
    # 检测关键点
    shape = landmark_detector(gray, rect)
    
    # 转换为numpy数组
    landmarks = np.array([[p.x, p.y] for p in shape.parts()])
    
    return landmarks


def extract_roi(image, landmarks, roi_type):
    """
    根据面部关键点提取感兴趣区域(ROI)
    
    Args:
        image (numpy.ndarray): 输入图像
        landmarks (numpy.ndarray): 面部关键点
        roi_type (str): ROI类型，可选'eyes', 'mouth', 'nose', 'all'
    
    Returns:
        numpy.ndarray: 裁剪后的ROI图像
    """
    if landmarks is None or len(landmarks) != 68:
        return image
    
    h, w = image.shape[:2]
    
    if roi_type == 'eyes':
        # 眼睛区域 (关键点36-47)
        eye_landmarks = landmarks[36:48]
        x_min = max(0, int(np.min(eye_landmarks[:, 0])) - 10)
        y_min = max(0, int(np.min(eye_landmarks[:, 1])) - 10)
        x_max = min(w, int(np.max(eye_landmarks[:, 0])) + 10)
        y_max = min(h, int(np.max(eye_landmarks[:, 1])) + 10)
        
        roi = image[y_min:y_max, x_min:x_max]
    
    elif roi_type == 'mouth':
        # 嘴巴区域 (关键点48-67)
        mouth_landmarks = landmarks[48:68]
        x_min = max(0, int(np.min(mouth_landmarks[:, 0])) - 10)
        y_min = max(0, int(np.min(mouth_landmarks[:, 1])) - 10)
        x_max = min(w, int(np.max(mouth_landmarks[:, 0])) + 10)
        y_max = min(h, int(np.max(mouth_landmarks[:, 1])) + 10)
        
        roi = image[y_min:y_max, x_min:x_max]
    
    elif roi_type == 'nose':
        # 鼻子区域 (关键点27-35)
        nose_landmarks = landmarks[27:36]
        x_min = max(0, int(np.min(nose_landmarks[:, 0])) - 10)
        y_min = max(0, int(np.min(nose_landmarks[:, 1])) - 10)
        x_max = min(w, int(np.max(nose_landmarks[:, 0])) + 10)
        y_max = min(h, int(np.max(nose_landmarks[:, 1])) + 10)
        
        roi = image[y_min:y_max, x_min:x_max]
    
    else:  # 'all' or default
        # 完整脸部
        face_landmarks = landmarks
        x_min = max(0, int(np.min(face_landmarks[:, 0])) - 20)
        y_min = max(0, int(np.min(face_landmarks[:, 1])) - 20)
        x_max = min(w, int(np.max(face_landmarks[:, 0])) + 20)
        y_max = min(h, int(np.max(face_landmarks[:, 1])) + 20)
        
        roi = image[y_min:y_max, x_min:x_max]
    
    return roi if roi.size > 0 else image


def compute_optical_flow(prev_frame, curr_frame, method, params=None):
    """
    计算光流
    
    Args:
        prev_frame (numpy.ndarray): 前一帧图像
        curr_frame (numpy.ndarray): 当前帧图像
        method (str): 光流算法
        params (dict): 算法参数
    
    Returns:
        numpy.ndarray: 光流场
    """
    # 默认参数
    default_params = {
        'pyr_scale': 0.5,
        'levels': 3,
        'winsize': 15,
        'iterations': 3,
        'poly_n': 5,
        'poly_sigma': 1.2
    }
    
    if params is None:
        params = default_params
    else:
        # 合并参数
        for key, value in default_params.items():
            if key not in params:
                params[key] = value
    
    # 确保图像是灰度图
    if len(prev_frame.shape) == 3:
        prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
    else:
        prev_gray = prev_frame
    
    if len(curr_frame.shape) == 3:
        curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
    else:
        curr_gray = curr_frame
    
    # 计算光流
    if method.lower() == 'farneback':
        flow = cv2.calcOpticalFlowFarneback(
            prev_gray, curr_gray,
            None,
            params['pyr_scale'],
            params['levels'],
            params['winsize'],
            params['iterations'],
            params['poly_n'],
            params['poly_sigma'],
            cv2.OPTFLOW_FARNEBACK_GAUSSIAN
        )
    elif method.lower() == 'tvl1':
        # 创建TV-L1光流算法对象
        tvl1 = cv2.optflow.DualTVL1OpticalFlow_create()
        flow = tvl1.calc(prev_gray, curr_gray, None)
    else:
        flow = None
    
    return flow


def extract_lbp_features(image, radius=1, n_points=8):
    """
    提取LBP特征
    
    Args:
        image (numpy.ndarray): 输入图像
        radius (int): LBP半径
        n_points (int): LBP点数
    
    Returns:
        numpy.ndarray: LBP特征
    """
    # 确保图像是灰度图
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image
    
    # 调整图像大小以加速处理
    resized = cv2.resize(gray, (128, 128))
    
    # 计算LBP
    lbp = local_binary_pattern(resized, n_points, radius, method='uniform')
    
    # 计算LBP直方图
    n_bins = n_points + 2
    hist, _ = np.histogram(lbp.ravel(), bins=n_bins, range=(0, n_bins), density=True)
    
    return hist


def local_binary_pattern(image, n_points, radius, method='uniform'):
    """
    计算局部二值模式(LBP)
    
    Args:
        image (numpy.ndarray): 输入图像
        n_points (int): 采样点数
        radius (int): 圆半径
        method (str): LBP方法
    
    Returns:
        numpy.ndarray: LBP特征图
    """
    # 获取图像尺寸
    rows, cols = image.shape
    
    # 创建结果数组
    result = np.zeros((rows, cols), dtype=np.uint8)
    
    # 计算采样点的位置
    angles = 2 * np.pi * np.arange(n_points) / n_points
    sample_points_x = radius * np.cos(angles)
    sample_points_y = radius * np.sin(angles)
    
    # 计算LBP
    for i in range(radius, rows - radius):
        for j in range(radius, cols - radius):
            center = image[i, j]
            lbp_code = 0
            
            for p in range(n_points):
                sample_x = j + sample_points_x[p]
                sample_y = i + sample_points_y[p]
                
                # 双线性插值
                x1, y1 = int(sample_x), int(sample_y)
                x2, y2 = min(x1 + 1, cols - 1), min(y1 + 1, rows - 1)
                
                # 权重
                tx, ty = sample_x - x1, sample_y - y1
                
                # 插值
                value = (1 - tx) * (1 - ty) * image[y1, x1] + \
                        tx * (1 - ty) * image[y1, x2] + \
                        (1 - tx) * ty * image[y2, x1] + \
                        tx * ty * image[y2, x2]
                
                # 比较
                if value >= center:
                    lbp_code |= (1 << p)
            
            result[i, j] = lbp_code
    
    # 均匀LBP
    if method == 'uniform':
        # 构建查找表
        uniform_mapping = np.zeros(256, dtype=np.uint8)
        uniform_count = 0
        
        for i in range(256):
            # 计算位变化次数
            bitwise_transitions = 0
            pattern = i
            
            for j in range(n_points):
                bitwise_transitions += ((pattern >> j) & 1) != ((pattern >> ((j + 1) % n_points)) & 1)
            
            if bitwise_transitions <= 2:
                uniform_mapping[i] = uniform_count
                uniform_count += 1
            else:
                uniform_mapping[i] = n_points + 1
        
        # 应用均匀映射
        for i in range(radius, rows - radius):
            for j in range(radius, cols - radius):
                result[i, j] = uniform_mapping[result[i, j]]
    
    return result


def process_casme2(data_dir, output_dir, face_detector, landmark_detector, config, save_processed):
    """
    处理CASME2数据集
    
    Args:
        data_dir (str): 数据目录
        output_dir (str): 输出目录
        face_detector: 人脸检测器
        landmark_detector: 关键点检测器
        config (dict): 配置字典
        save_processed (bool): 是否保存预处理后的数据
    """
    logger = logging.getLogger()
    logger.info("处理CASME2数据集...")
    
    # 获取配置
    face_detector_type = config.get('preprocessing', {}).get('face_detector', 'MTCNN')
    roi_method = config.get('preprocessing', {}).get('roi_method', 'facial_landmarks')
    extract_lbp_flag = config.get('preprocessing', {}).get('extract_lbp', True)
    optical_flow_method = config.get('preprocessing', {}).get('optical_flow', 'farneback')
    optical_flow_params = config.get('preprocessing', {}).get('optical_flow_params', {})
    frame_size = config.get('dataset', {}).get('frame_size', [224, 224])
    
    # 加载元数据
    metadata_path = os.path.join(data_dir, 'CASME2-coding-20190701.xlsx')
    if not os.path.exists(metadata_path):
        logger.error(f"CASME2元数据文件不存在: {metadata_path}")
        return
    
    try:
        metadata = pd.read_excel(metadata_path)
        
        # 创建训练、验证和测试集划分
        train_meta = metadata.sample(frac=0.7, random_state=42)
        remaining = metadata.drop(train_meta.index)
        val_meta = remaining.sample(frac=0.33, random_state=42)  # 验证集占剩余数据的1/3，相当于总数据的10%
        test_meta = remaining.drop(val_meta.index)
        
        # 创建元数据目录
        if save_processed:
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存元数据划分
            train_meta.to_csv(os.path.join(output_dir, 'train_metadata.csv'), index=False)
            val_meta.to_csv(os.path.join(output_dir, 'val_metadata.csv'), index=False)
            test_meta.to_csv(os.path.join(output_dir, 'test_metadata.csv'), index=False)
        
        # 处理每个样本
        for metadata_df, split in [(train_meta, 'train'), (val_meta, 'val'), (test_meta, 'test')]:
            for _, row in tqdm(metadata_df.iterrows(), desc=f"处理 {split} 集", total=len(metadata_df)):
                subject = f"sub{row['Subject']}"
                filename = row['Filename']
                emotion = row['Estimated Emotion']
                
                # 构建样本路径
                sample_dir = os.path.join(data_dir, subject, filename)
                
                if not os.path.exists(sample_dir):
                    logger.warning(f"样本目录不存在: {sample_dir}")
                    continue
                
                # 创建输出目录
                if save_processed:
                    output_sample_dir = os.path.join(output_dir, subject, filename)
                    os.makedirs(output_sample_dir, exist_ok=True)
                
                # 加载图像序列
                frame_files = sorted([f for f in os.listdir(sample_dir) if f.endswith(('.jpg', '.png', '.bmp'))])
                
                if len(frame_files) == 0:
                    logger.warning(f"未找到图像文件: {sample_dir}")
                    continue
                
                # 处理每一帧
                processed_frames = []
                landmarks_list = []
                optical_flows = []
                lbp_features = []
                
                for i, file in enumerate(frame_files):
                    img_path = os.path.join(sample_dir, file)
                    img = cv2.imread(img_path)
                    
                    if img is None:
                        logger.warning(f"无法读取图像: {img_path}")
                        continue
                    
                    # 检测人脸
                    face_boxes = detect_face(img, face_detector, face_detector_type)
                    
                    if not face_boxes:
                        logger.warning(f"未检测到人脸: {img_path}")
                        continue
                    
                    # 使用最大的人脸
                    face_box = max(face_boxes, key=lambda box: (box[2] - box[0]) * (box[3] - box[1]))
                    
                    # 提取ROI
                    if roi_method == 'facial_landmarks':
                        # 检测关键点
                        landmarks = detect_landmarks(img, face_box, landmark_detector)
                        landmarks_list.append(landmarks)
                        
                        # 提取ROI
                        roi_img = extract_roi(img, landmarks, 'all')
                    else:
                        # 直接裁剪人脸
                        x1, y1, x2, y2 = face_box
                        roi_img = img[y1:y2, x1:x2]
                    
                    # 调整大小
                    roi_img = cv2.resize(roi_img, (frame_size[1], frame_size[0]))
                    
                    # 提取LBP特征
                    if extract_lbp_flag:
                        lbp_feat = extract_lbp_features(roi_img)
                        lbp_features.append(lbp_feat)
                    
                    # 计算光流
                    if i > 0:
                        flow = compute_optical_flow(processed_frames[-1], roi_img, 
                                                   optical_flow_method, optical_flow_params)
                        optical_flows.append(flow)
                    
                    # 添加到处理后的帧列表
                    processed_frames.append(roi_img)
                    
                    # 保存处理后的帧
                    if save_processed:
                        output_path = os.path.join(output_sample_dir, file)
                        cv2.imwrite(output_path, roi_img)
                
                # 保存处理后的数据
                if save_processed and processed_frames:
                    # 转换为numpy数组并保存
                    processed_frames = np.stack(processed_frames, axis=0)  # [T, H, W, C]
                    
                    # 保存为npz文件
                    np.savez(
                        os.path.join(output_sample_dir, 'frames.npz'),
                        frames=processed_frames,
                        landmarks=np.array(landmarks_list) if landmarks_list else None,
                        optical_flows=np.array(optical_flows) if optical_flows else None,
                        lbp_features=np.array(lbp_features) if lbp_features else None
                    )
    
    except Exception as e:
        logger.error(f"处理CASME2数据集时出错: {str(e)}")


def process_smic(data_dir, output_dir, face_detector, landmark_detector, config, save_processed):
    """
    处理SMIC数据集
    
    Args:
        data_dir (str): 数据目录
        output_dir (str): 输出目录
        face_detector: 人脸检测器
        landmark_detector: 关键点检测器
        config (dict): 配置字典
        save_processed (bool): 是否保存预处理后的数据
    """
    logger = logging.getLogger()
    logger.info("处理SMIC数据集...")
    
    # 这里需要根据SMIC数据集的实际结构实现
    logger.warning("SMIC数据集处理尚未实现")


def process_samm(data_dir, output_dir, face_detector, landmark_detector, config, save_processed):
    """
    处理SAMM数据集
    
    Args:
        data_dir (str): 数据目录
        output_dir (str): 输出目录
        face_detector: 人脸检测器
        landmark_detector: 关键点检测器
        config (dict): 配置字典
        save_processed (bool): 是否保存预处理后的数据
    """
    logger = logging.getLogger()
    logger.info("处理SAMM数据集...")
    
    # 这里需要根据SAMM数据集的实际结构实现
    logger.warning("SAMM数据集处理尚未实现")