import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import os
from glob import glob
import warnings
import sys
from matplotlib.font_manager import FontProperties

# 设置中文字体
def setup_chinese_font():
    """设置中文字体，按优先级尝试"""
    # 常见的中文字体列表
    chinese_fonts = [
        'Microsoft YaHei',   # 微软雅黑
        'SimHei',            # 黑体
        'SimSun',            # 宋体
        'NSimSun',           # 新宋体
        'FangSong',          # 仿宋
        'KaiTi',             # 楷体
        'Arial Unicode MS'   # 通用字体
    ]
    
    # 尝试设置中文字体
    font_set = False
    for font_name in chinese_fonts:
        try:
            font = FontProperties(fname=f"C:\\Windows\\Fonts\\{font_name}.ttf")
            plt.rcParams['font.family'] = ['sans-serif']
            plt.rcParams['font.sans-serif'] = [font_name]
            plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
            font_set = True
            print(f"成功设置中文字体: {font_name}")
            break
        except:
            continue
    
    if not font_set:
        print("警告：未能设置中文字体，图表中的中文可能无法正确显示")

# 立即打印一条消息
print("脚本开始执行...")
sys.stdout.flush()

warnings.filterwarnings('ignore')

def load_and_preprocess_data(file_path, max_frames=2000):
    """加载并预处理单个CSV文件的数据"""
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path)
        
        # 确保必要的列存在
        required_columns = ['Frame', 'Gaze_X', 'Gaze_Y', 'Expression']
        if not all(col in df.columns for col in required_columns):
            print(f"文件 {file_path} 缺少必要的列")
            sys.stdout.flush()
            return None
        
        # 截取前2000帧，不足则用最后一帧填充
        if len(df) < max_frames:
            last_row = df.iloc[-1:]
            fill_rows = pd.concat([last_row] * (max_frames - len(df)), ignore_index=True)
            df = pd.concat([df, fill_rows], ignore_index=True)
        else:
            df = df.iloc[:max_frames]
        
        # 异常值处理
        df.loc[df['Gaze_X'] < -1, 'Gaze_X'] = -1
        df.loc[df['Gaze_X'] > 1, 'Gaze_X'] = 1
        df.loc[df['Gaze_Y'] < -1, 'Gaze_Y'] = -1
        df.loc[df['Gaze_Y'] > 1, 'Gaze_Y'] = 1
        
        return df
    except Exception as e:
        print(f"处理文件 {file_path} 时出错: {str(e)}")
        sys.stdout.flush()
        return None

def extract_features(df):
    """从预处理后的数据中提取特征"""
    features = {}
    
    try:
        # 统计特征
        features['gaze_x_mean'] = df['Gaze_X'].mean()
        features['gaze_x_std'] = df['Gaze_X'].std()
        features['gaze_y_mean'] = df['Gaze_Y'].mean()
        features['gaze_y_std'] = df['Gaze_Y'].std()
        
        # 表情特征
        features['expression_mode'] = df['Expression'].mode().iloc[0]
        expression_changes = (df['Expression'] != df['Expression'].shift()).sum()
        features['expression_change_rate'] = expression_changes / len(df)
        
        # 表情分布
        for i in range(7):  # 0-6的表情标签
            features[f'expression_{i}_ratio'] = (df['Expression'] == i).mean()
        
        # 时序特征
        gaze_distances = np.sqrt(
            (df['Gaze_X'] - df['Gaze_X'].shift())**2 + 
            (df['Gaze_Y'] - df['Gaze_Y'].shift())**2
        ).fillna(0)
        features['total_gaze_distance'] = gaze_distances.sum()
        features['gaze_velocity_mean'] = gaze_distances.mean()
        features['gaze_velocity_std'] = gaze_distances.std()
        
        # 空间特征（4象限分布）
        q1 = ((df['Gaze_X'] <= 0) & (df['Gaze_Y'] <= 0)).mean()
        q2 = ((df['Gaze_X'] > 0) & (df['Gaze_Y'] <= 0)).mean()
        q3 = ((df['Gaze_X'] <= 0) & (df['Gaze_Y'] > 0)).mean()
        q4 = ((df['Gaze_X'] > 0) & (df['Gaze_Y'] > 0)).mean()
        
        features['quadrant_1_ratio'] = q1
        features['quadrant_2_ratio'] = q2
        features['quadrant_3_ratio'] = q3
        features['quadrant_4_ratio'] = q4
        
        # 注视点集中度
        features['gaze_concentration'] = np.sqrt(df['Gaze_X'].var() + df['Gaze_Y'].var())
        
        return features
    except Exception as e:
        print(f"特征提取错误: {str(e)}")
        sys.stdout.flush()
        return None

def print_metrics(y_true, y_pred, dataset_name=""):
    """打印模型评估指标"""
    from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
    
    accuracy = accuracy_score(y_true, y_pred)
    precision = precision_score(y_true, y_pred, pos_label=1)  # ASD为正类
    recall = recall_score(y_true, y_pred, pos_label=1)
    f1 = f1_score(y_true, y_pred, pos_label=1)
    
    print(f"\n{dataset_name}评估指标：")
    print(f"准确率（Accuracy）：    {accuracy:.4f}")
    print(f"精确率（Precision）：   {precision:.4f}")
    print(f"召回率（Recall）：      {recall:.4f}")
    print(f"F1分数（F1-Score）：   {f1:.4f}")
    
    return {
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1
    }

def plot_feature_importance(feature_importance, save_path):
    """绘制特征重要性图"""
    plt.figure(figsize=(12, 8))
    
    # 确保中文特征名称正确显示
    feature_mapping = {
        'gaze_x_mean': '视线X均值',
        'gaze_x_std': '视线X标准差',
        'gaze_y_mean': '视线Y均值',
        'gaze_y_std': '视线Y标准差',
        'expression_mode': '表情众数',
        'expression_change_rate': '表情变化率',
        'total_gaze_distance': '视线轨迹总距离',
        'gaze_velocity_mean': '视线速度均值',
        'gaze_velocity_std': '视线速度标准差',
        'quadrant_1_ratio': '第一象限占比',
        'quadrant_2_ratio': '第二象限占比',
        'quadrant_3_ratio': '第三象限占比',
        'quadrant_4_ratio': '第四象限占比',
        'gaze_concentration': '注视点集中度'
    }
    
    # 添加表情比例的中文映射
    for i in range(7):
        feature_mapping[f'expression_{i}_ratio'] = f'表情{i}占比'
    
    # 转换特征名称为中文
    feature_importance['feature'] = feature_importance['feature'].map(
        lambda x: feature_mapping.get(x, x)
    )
    
    sns.barplot(data=feature_importance, x='importance', y='feature')
    plt.title('特征重要性排序', fontsize=14)
    plt.xlabel('重要性得分', fontsize=12)
    plt.ylabel('特征名称', fontsize=12)
    plt.xticks(fontsize=10)
    plt.yticks(fontsize=10)
    plt.tight_layout()
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def plot_confusion_matrix(cm, save_path):
    """绘制混淆矩阵"""
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['TD儿童', 'ASD儿童'],
                yticklabels=['TD儿童', 'ASD儿童'])
    plt.title('混淆矩阵', fontsize=14)
    plt.xlabel('预测标签', fontsize=12)
    plt.ylabel('真实标签', fontsize=12)
    plt.tight_layout()
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def main():
    # 设置中文字体
    setup_chinese_font()
    
    print("进入main函数...")
    sys.stdout.flush()
    
    # 设置随机种子
    np.random.seed(42)
    
    # 获取当前脚本所在目录的路径
    current_dir = os.path.dirname(os.path.abspath(__file__))
    print(f"当前目录: {current_dir}")
    sys.stdout.flush()
    
    # 构建ASD和TD文件夹的完整路径
    asd_path = os.path.join(current_dir, 'ASD')
    td_path = os.path.join(current_dir, 'TD')
    
    # 检查文件夹是否存在
    print(f"检查ASD路径: {asd_path}")
    print(f"检查TD路径: {td_path}")
    sys.stdout.flush()
    
    if not os.path.exists(asd_path):
        print(f"错误：ASD文件夹不存在: {asd_path}")
        sys.stdout.flush()
        return
    if not os.path.exists(td_path):
        print(f"错误：TD文件夹不存在: {td_path}")
        sys.stdout.flush()
        return
    
    # 获取所有CSV文件
    asd_files = glob(os.path.join(asd_path, '*.csv'))
    td_files = glob(os.path.join(td_path, '*.csv'))
    
    print(f"找到 {len(asd_files)} 个ASD文件和 {len(td_files)} 个TD文件")
    sys.stdout.flush()
    
    if len(asd_files) == 0 or len(td_files) == 0:
        print("错误：未找到足够的数据文件，请确保ASD和TD文件夹中都有CSV文件")
        sys.stdout.flush()
        return
    
    # 处理所有文件并提取特征
    all_features = []
    all_labels = []
    
    # 处理ASD文件
    print("\n开始处理ASD文件...")
    sys.stdout.flush()
    
    for i, file_path in enumerate(asd_files, 1):
        print(f"处理ASD文件 {i}/{len(asd_files)}: {os.path.basename(file_path)}")
        sys.stdout.flush()
        
        df = load_and_preprocess_data(file_path)
        if df is not None:
            features = extract_features(df)
            if features is not None:
                all_features.append(features)
                all_labels.append(1)  # ASD标签为1
    
    # 处理TD文件
    print("\n开始处理TD文件...")
    sys.stdout.flush()
    
    for i, file_path in enumerate(td_files, 1):
        print(f"处理TD文件 {i}/{len(td_files)}: {os.path.basename(file_path)}")
        sys.stdout.flush()
        
        df = load_and_preprocess_data(file_path)
        if df is not None:
            features = extract_features(df)
            if features is not None:
                all_features.append(features)
                all_labels.append(0)  # TD标签为0
    
    if len(all_features) == 0:
        print("错误：没有成功处理任何数据文件")
        sys.stdout.flush()
        return
    
    print(f"\n成功提取特征，开始训练模型...")
    print(f"总样本数：{len(all_features)}")
    sys.stdout.flush()
    
    try:
        # 转换为DataFrame
        feature_df = pd.DataFrame(all_features)
        labels = np.array(all_labels)
        
        print(f"\n数据集统计：")
        print(f"ASD样本数量：{sum(labels == 1)}")
        print(f"TD样本数量：{sum(labels == 0)}")
        sys.stdout.flush()
        
        # 数据标准化
        scaler = StandardScaler()
        X = scaler.fit_transform(feature_df)
        y = labels
        
        # 分层抽样划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, stratify=y, random_state=42
        )
        
        # 训练随机森林模型
        print("\n开始训练随机森林模型...")
        sys.stdout.flush()
        
        rf_model = RandomForestClassifier(
            n_estimators=1000,
            max_depth=5,
            class_weight='balanced',
            random_state=42
        )
        
        rf_model.fit(X_train, y_train)
        print("模型训练完成！")
        sys.stdout.flush()
        
        # 计算训练集和测试集的预测结果
        y_train_pred = rf_model.predict(X_train)
        y_test_pred = rf_model.predict(X_test)
        
        # 打印训练集和测试集的评估指标
        train_metrics = print_metrics(y_train, y_train_pred, "训练集")
        test_metrics = print_metrics(y_test, y_test_pred, "测试集")
        
        # 创建评估指标对比表格
        metrics_df = pd.DataFrame({
            '指标': ['准确率（Accuracy）', '精确率（Precision）', '召回率（Recall）', 'F1分数（F1-Score）'],
            '训练集': [
                train_metrics['accuracy'],
                train_metrics['precision'],
                train_metrics['recall'],
                train_metrics['f1']
            ],
            '测试集': [
                test_metrics['accuracy'],
                test_metrics['precision'],
                test_metrics['recall'],
                test_metrics['f1']
            ]
        })
        
        # 保存评估指标到CSV文件
        metrics_df.to_csv('model_metrics.csv', index=False, encoding='utf-8-sig')
        print("\n评估指标对比表：")
        print(metrics_df.to_string(index=False))
        
        # 模型评估
        y_pred = rf_model.predict(X_test)
        print("\n分类报告:")
        print(classification_report(y_test, y_pred, target_names=['TD', 'ASD']))
        sys.stdout.flush()
        
        # 保存特征重要性图
        print("\n生成特征重要性图...")
        sys.stdout.flush()
        
        feature_importance = pd.DataFrame({
            'feature': feature_df.columns,
            'importance': rf_model.feature_importances_
        }).sort_values('importance', ascending=False)
        
        plot_feature_importance(feature_importance, os.path.join(current_dir, '特征重要性.png'))
        
        # 保存混淆矩阵
        print("\n生成混淆矩阵...")
        sys.stdout.flush()
        
        cm = confusion_matrix(y_test, y_pred)
        plot_confusion_matrix(cm, os.path.join(current_dir, '混淆矩阵.png'))
        
        # 打印前10个最重要的特征
        print("\n前10个最重要的特征：")
        print(feature_importance.head(10))
        sys.stdout.flush()
        
        print("\n分析完成！")
        sys.stdout.flush()
        
    except Exception as e:
        print(f"错误：处理数据时发生异常: {str(e)}")
        sys.stdout.flush()

if __name__ == '__main__':
    main()