import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class ASDClassifier:
    def __init__(self, data_path, frames_to_use=2000):
        """
        初始化ASD分类器
        
        参数:
        data_path: 数据文件路径
        frames_to_use: 每个样本使用的帧数
        """
        self.data_path = data_path
        self.frames_to_use = frames_to_use
        self.scaler = StandardScaler()
        self.features = []
        self.labels = []
        self.feature_names = []
        
    def load_csv_data(self, file_path, max_frames=None):
        """
        加载单个CSV文件数据
        
        参数:
        file_path: CSV文件路径
        max_frames: 最大帧数限制
        """
        try:
            df = pd.read_csv(file_path)
            
            # 如果指定了最大帧数，则截取前N帧
            if max_frames and len(df) > max_frames:
                df = df.head(max_frames)
            
            return df
        except Exception as e:
            print(f"读取文件 {file_path} 失败: {e}")
            return None
    
    def extract_features(self, df):
        """
        从单个样本数据中提取特征
        
        参数:
        df: 单个样本的DataFrame
        
        返回:
        feature_vector: 特征向量
        """
        if df is None or len(df) == 0:
            return None
            
        features = []
        
        # 视线特征 (Gaze_X, Gaze_Y)
        if 'Gaze_X' in df.columns and 'Gaze_Y' in df.columns:
            gaze_x = df['Gaze_X'].dropna()
            gaze_y = df['Gaze_Y'].dropna()
            
            if len(gaze_x) > 0 and len(gaze_y) > 0:
                # 基本统计特征
                features.extend([
                    gaze_x.mean(), gaze_x.std(), gaze_x.min(), gaze_x.max(),
                    gaze_y.mean(), gaze_y.std(), gaze_y.min(), gaze_y.max(),
                ])
                
                # 视线变化特征
                gaze_x_diff = np.diff(gaze_x)
                gaze_y_diff = np.diff(gaze_y)
                if len(gaze_x_diff) > 0:
                    features.extend([
                        np.mean(gaze_x_diff), np.std(gaze_x_diff),
                        np.mean(gaze_y_diff), np.std(gaze_y_diff)
                    ])
                else:
                    features.extend([0, 0, 0, 0])
                
                # 视线移动距离
                if len(gaze_x_diff) > 0:
                    gaze_distance = np.sqrt(gaze_x_diff**2 + gaze_y_diff**2)
                    features.extend([
                        np.mean(gaze_distance), np.std(gaze_distance),
                        np.max(gaze_distance), np.sum(gaze_distance)
                    ])
                else:
                    features.extend([0, 0, 0, 0])
            else:
                features.extend([0] * 16)
        else:
            features.extend([0] * 16)
        
        # 表情特征
        if 'Expression' in df.columns:
            expressions = df['Expression'].dropna()
            if len(expressions) > 0:
                # 表情变化频率
                expr_changes = np.sum(np.diff(expressions) != 0)
                features.append(expr_changes / len(expressions) if len(expressions) > 1 else 0)
                
                # 表情种类数量
                unique_expressions = len(expressions.unique())
                features.append(unique_expressions)
                
                # 最常见表情的比例
                most_common_expr_ratio = expressions.value_counts().iloc[0] / len(expressions)
                features.append(most_common_expr_ratio)
            else:
                features.extend([0, 0, 0])
        else:
            features.extend([0, 0, 0])
        
        # 时间序列特征
        if len(df) > 1:
            # 数据的变异系数
            for col in ['Gaze_X', 'Gaze_Y']:
                if col in df.columns:
                    data = df[col].dropna()
                    if len(data) > 0 and data.std() > 0:
                        cv = data.std() / data.mean() if data.mean() != 0 else 0
                        features.append(abs(cv))
                    else:
                        features.append(0)
                else:
                    features.append(0)
        else:
            features.extend([0, 0])
        
        return np.array(features)
    
    def load_data(self):
        """加载所有数据"""
        print("正在加载数据...")
          # 查找ASD和TD文件夹
        asd_path = os.path.join(self.data_path, 'ASD')
        td_path = os.path.join(self.data_path, 'TD')
        
        if not os.path.exists(asd_path):
            print("未找到ASD文件夹")
            return False
            
        if not os.path.exists(td_path):
            print("未找到TD文件夹")
            return False
        
        # 加载ASD数据
        asd_files = [f for f in os.listdir(asd_path) if f.endswith('.csv')]
        print(f"找到 {len(asd_files)} 个ASD样本")
        
        for file in asd_files:
            file_path = os.path.join(asd_path, file)
            df = self.load_csv_data(file_path, self.frames_to_use)
            features = self.extract_features(df)
            if features is not None:
                self.features.append(features)
                self.labels.append(1)  # ASD标记为1
        
        # 加载TD数据
        td_files = [f for f in os.listdir(td_path) if f.endswith('.csv')]
        print(f"找到 {len(td_files)} 个TD样本")
        
        for file in td_files:
            file_path = os.path.join(td_path, file)
            df = self.load_csv_data(file_path, self.frames_to_use)
            features = self.extract_features(df)
            if features is not None:
                self.features.append(features)
                self.labels.append(0)  # TD标记为0
        
        if len(self.features) == 0:
            print("未成功加载任何数据")
            return False
        
        self.features = np.array(self.features)
        self.labels = np.array(self.labels)
        
        print(f"成功加载 {len(self.features)} 个样本")
        print(f"ASD样本: {np.sum(self.labels == 1)}个")
        print(f"TD样本: {np.sum(self.labels == 0)}个")
        print(f"特征维度: {self.features.shape[1]}")
        
        # 定义特征名称
        self.feature_names = [
            'Gaze_X_mean', 'Gaze_X_std', 'Gaze_X_min', 'Gaze_X_max',
            'Gaze_Y_mean', 'Gaze_Y_std', 'Gaze_Y_min', 'Gaze_Y_max',
            'Gaze_X_diff_mean', 'Gaze_X_diff_std', 'Gaze_Y_diff_mean', 'Gaze_Y_diff_std',
            'Gaze_distance_mean', 'Gaze_distance_std', 'Gaze_distance_max', 'Gaze_distance_sum',
            'Expression_change_rate', 'Expression_unique_count', 'Expression_most_common_ratio',
            'Gaze_X_CV', 'Gaze_Y_CV'
        ]
        
        return True
    
    def preprocess_data(self):
        """数据预处理"""
        print("正在进行数据预处理...")
        
        # 处理缺失值和异常值
        # 替换inf和-inf为nan
        self.features = np.where(np.isinf(self.features), np.nan, self.features)
        
        # 用中位数填充缺失值
        imputer = SimpleImputer(strategy='median')
        self.features = imputer.fit_transform(self.features)
        
        # 标准化特征
        self.features = self.scaler.fit_transform(self.features)
        
        print("数据预处理完成")
    
    def split_data(self, test_size=0.2, random_state=42):
        """划分训练集和测试集"""
        print(f"正在划分数据集 (训练集: {int((1-test_size)*100)}%, 测试集: {int(test_size*100)}%)")
        
        # 分层抽样，确保训练集和测试集中ASD和TD样本比例相似
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.features, self.labels, 
            test_size=test_size, 
            random_state=random_state, 
            stratify=self.labels
        )
        
        print(f"训练集样本数: {len(self.X_train)} (ASD: {np.sum(self.y_train == 1)}, TD: {np.sum(self.y_train == 0)})")
        print(f"测试集样本数: {len(self.X_test)} (ASD: {np.sum(self.y_test == 1)}, TD: {np.sum(self.y_test == 0)})")
    
    def train_models(self):
        """训练多个分类模型"""
        print("正在训练分类模型...")
        
        self.models = {
            'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42),
            'SVM': SVC(kernel='rbf', random_state=42),
            'Logistic Regression': LogisticRegression(random_state=42, max_iter=1000)
        }
        
        self.model_results = {}
        
        for name, model in self.models.items():
            print(f"训练 {name}...")
            model.fit(self.X_train, self.y_train)
            
            # 预测
            y_pred = model.predict(self.X_test)
            
            # 评估
            accuracy = accuracy_score(self.y_test, y_pred)
            report = classification_report(self.y_test, y_pred, target_names=['TD', 'ASD'], output_dict=True)
            cm = confusion_matrix(self.y_test, y_pred)
            
            self.model_results[name] = {
                'model': model,
                'predictions': y_pred,
                'accuracy': accuracy,
                'classification_report': report,
                'confusion_matrix': cm
            }
            
            print(f"{name} 准确率: {accuracy:.4f}")
    
    def evaluate_models(self):
        """评估模型性能"""
        print("\n" + "="*50)
        print("模型评估结果")
        print("="*50)
        
        for name, results in self.model_results.items():
            print(f"\n{name}:")
            print(f"准确率: {results['accuracy']:.4f}")
            print("\n分类报告:")
            report = results['classification_report']
            print(f"TD类别 - 精确率: {report['TD']['precision']:.4f}, 召回率: {report['TD']['recall']:.4f}, F1-score: {report['TD']['f1-score']:.4f}")
            print(f"ASD类别 - 精确率: {report['ASD']['precision']:.4f}, 召回率: {report['ASD']['recall']:.4f}, F1-score: {report['ASD']['f1-score']:.4f}")
            
            print("\n混淆矩阵:")
            cm = results['confusion_matrix']
            print(f"真实TD预测TD: {cm[0,0]}, 真实TD预测ASD: {cm[0,1]}")
            print(f"真实ASD预测TD: {cm[1,0]}, 真实ASD预测ASD: {cm[1,1]}")
    
    def plot_results(self):
        """绘制结果图表"""
        try:
            # 创建图表
            fig, axes = plt.subplots(2, 2, figsize=(15, 12))
            fig.suptitle('ASD分类模型结果分析', fontsize=16)
            
            # 1. 模型准确率比较
            model_names = list(self.model_results.keys())
            accuracies = [self.model_results[name]['accuracy'] for name in model_names]
            
            axes[0,0].bar(model_names, accuracies, color=['skyblue', 'lightgreen', 'lightcoral'])
            axes[0,0].set_title('模型准确率比较')
            axes[0,0].set_ylabel('准确率')
            axes[0,0].set_ylim(0, 1)
            for i, v in enumerate(accuracies):
                axes[0,0].text(i, v + 0.01, f'{v:.3f}', ha='center')
            
            # 2. 特征重要性 (使用随机森林)
            if 'Random Forest' in self.model_results:
                rf_model = self.model_results['Random Forest']['model']
                feature_importance = rf_model.feature_importances_
                
                # 选择前10个最重要的特征
                top_indices = np.argsort(feature_importance)[-10:]
                top_features = [self.feature_names[i] for i in top_indices]
                top_importance = feature_importance[top_indices]
                
                axes[0,1].barh(range(len(top_features)), top_importance)
                axes[0,1].set_yticks(range(len(top_features)))
                axes[0,1].set_yticklabels(top_features)
                axes[0,1].set_title('特征重要性 (随机森林)')
                axes[0,1].set_xlabel('重要性分数')
            
            # 3. 混淆矩阵热图 (使用最佳模型)
            best_model_name = max(self.model_results.keys(), key=lambda k: self.model_results[k]['accuracy'])
            cm = self.model_results[best_model_name]['confusion_matrix']
            
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                       xticklabels=['TD', 'ASD'], yticklabels=['TD', 'ASD'], ax=axes[1,0])
            axes[1,0].set_title(f'混淆矩阵 ({best_model_name})')
            axes[1,0].set_xlabel('预测标签')
            axes[1,0].set_ylabel('真实标签')
            
            # 4. 数据分布
            asd_count = np.sum(self.labels == 1)
            td_count = np.sum(self.labels == 0)
            
            axes[1,1].pie([td_count, asd_count], labels=['TD', 'ASD'], autopct='%1.1f%%', 
                         colors=['lightblue', 'lightpink'])
            axes[1,1].set_title('数据集分布')
            
            plt.tight_layout()
            plt.show()
            
        except Exception as e:
            print(f"绘图时出现错误: {e}")
    
    def run_analysis(self):
        """运行完整的分析流程"""
        print("开始ASD分类分析...")
        print("="*50)
        
        # 1. 加载数据
        if not self.load_data():
            print("数据加载失败，分析终止")
            return
        
        # 2. 数据预处理
        self.preprocess_data()
        
        # 3. 划分数据集
        self.split_data()
        
        # 4. 训练模型
        self.train_models()
        
        # 5. 评估模型
        self.evaluate_models()
        
        # 6. 绘制结果
        self.plot_results()
        
        print("\n分析完成！")

# 主程序
if __name__ == "__main__":
    # 创建分类器实例
    data_path = r"c:\Users\27806\Desktop\python"
    classifier = ASDClassifier(data_path, frames_to_use=2000)
    
    # 运行分析
    classifier.run_analysis()
