# ASD与TD儿童分类任务完整实现

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc
from sklearn.impute import SimpleImputer
import xgboost as xgb
import os
import glob
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

print("=== ASD与TD儿童分类任务 ===")
print("基于视线方向(Gaze_X, Gaze_Y)和面部表情(Expression)数据进行分类")

# ==================== 一、数据加载与预处理 ====================

def load_and_preprocess_data(asd_folder='ASD', td_folder='TD', max_frames=2000):
    """
    加载ASD和TD数据并进行预处理
    """
    print("\n1. 数据加载与预处理...")
    
    all_data = []
    labels = []
    
    # 加载ASD数据
    if os.path.exists(asd_folder):
        asd_files = glob.glob(os.path.join(asd_folder, '*.csv'))
        print(f"找到 {len(asd_files)} 个ASD样本文件")
        
        for file in asd_files:
            try:
                df = pd.read_csv(file)
                # 取前max_frames帧数据
                df = df.head(max_frames)
                all_data.append(df)
                labels.append(1)  # ASD标记为1
            except Exception as e:
                print(f"读取文件 {file} 时出错: {e}")
    else:
        print(f"未找到ASD文件夹: {asd_folder}")
    
    # 加载TD数据
    if os.path.exists(td_folder):
        td_files = glob.glob(os.path.join(td_folder, '*.csv'))
        print(f"找到 {len(td_files)} 个TD样本文件")
        
        for file in td_files:
            try:
                df = pd.read_csv(file)
                # 取前max_frames帧数据
                df = df.head(max_frames)
                all_data.append(df)
                labels.append(0)  # TD标记为0
            except Exception as e:
                print(f"读取文件 {file} 时出错: {e}")
    else:
        print(f"未找到TD文件夹: {td_folder}")
    
    print(f"总计加载 {len(all_data)} 个样本")
    print(f"ASD样本数: {sum(labels)}, TD样本数: {len(labels) - sum(labels)}")
    
    return all_data, labels

def extract_features(data_list):
    """
    从时间序列数据中提取特征
    """
    print("\n2. 特征提取...")
    
    features = []
    
    for df in data_list:
        sample_features = {}
        
        # 基本统计特征
        if 'Gaze_X' in df.columns:
            sample_features['gaze_x_mean'] = df['Gaze_X'].mean()
            sample_features['gaze_x_std'] = df['Gaze_X'].std()
            sample_features['gaze_x_min'] = df['Gaze_X'].min()
            sample_features['gaze_x_max'] = df['Gaze_X'].max()
        
        if 'Gaze_Y' in df.columns:
            sample_features['gaze_y_mean'] = df['Gaze_Y'].mean()
            sample_features['gaze_y_std'] = df['Gaze_Y'].std()
            sample_features['gaze_y_min'] = df['Gaze_Y'].min()
            sample_features['gaze_y_max'] = df['Gaze_Y'].max()
        
        # 视线移动特征
        if 'Gaze_X' in df.columns and 'Gaze_Y' in df.columns:
            # 计算视线移动距离
            gaze_diff_x = df['Gaze_X'].diff()
            gaze_diff_y = df['Gaze_Y'].diff()
            movement_distance = np.sqrt(gaze_diff_x**2 + gaze_diff_y**2)
            
            sample_features['movement_mean'] = movement_distance.mean()
            sample_features['movement_std'] = movement_distance.std()
            sample_features['movement_max'] = movement_distance.max()
            
            # 视线移动速度
            sample_features['movement_velocity'] = movement_distance.sum() / len(df)
        
        # 表情特征
        if 'Expression' in df.columns:
            # 表情分布
            expr_counts = df['Expression'].value_counts(normalize=True)
            
            # 提取主要表情的比例（假设有常见表情类型）
            common_expressions = ['neutral', 'happy', 'sad', 'angry', 'surprise', 'fear']
            for expr in common_expressions:
                sample_features[f'expr_{expr}_ratio'] = expr_counts.get(expr, 0)
            
            # 表情变化频率
            expr_changes = (df['Expression'] != df['Expression'].shift()).sum()
            sample_features['expr_change_freq'] = expr_changes / len(df)
        
        # 时间序列特征
        sample_features['sequence_length'] = len(df)
        
        # 处理缺失值
        for key, value in sample_features.items():
            if pd.isna(value):
                sample_features[key] = 0
        
        features.append(sample_features)
    
    feature_df = pd.DataFrame(features)
    print(f"提取特征维度: {feature_df.shape}")
    print(f"特征列: {list(feature_df.columns)}")
    
    return feature_df

# 模拟数据生成（如果没有实际数据文件）
def generate_mock_data(n_asd=50, n_td=50, n_frames=2000):
    """
    生成模拟数据用于演示
    """
    print("\n生成模拟数据用于演示...")
    
    np.random.seed(42)
    all_data = []
    labels = []
    
    # 生成ASD样本（特征略有不同）
    for i in range(n_asd):
        frames = np.random.randint(500, n_frames)
        df = pd.DataFrame({
            'Frame': range(frames),
            'Gaze_X': np.random.normal(0.3, 0.2, frames),  # ASD儿童视线偏移
            'Gaze_Y': np.random.normal(0.4, 0.25, frames),
            'Expression': np.random.choice(['neutral', 'sad', 'angry'], frames, p=[0.6, 0.25, 0.15])
        })
        all_data.append(df)
        labels.append(1)
    
    # 生成TD样本
    for i in range(n_td):
        frames = np.random.randint(500, n_frames)
        df = pd.DataFrame({
            'Frame': range(frames),
            'Gaze_X': np.random.normal(0.5, 0.1, frames),  # TD儿童视线更集中
            'Gaze_Y': np.random.normal(0.5, 0.15, frames),
            'Expression': np.random.choice(['neutral', 'happy', 'surprise'], frames, p=[0.7, 0.2, 0.1])
        })
        all_data.append(df)
        labels.append(0)
    
    return all_data, labels

# ==================== 二、主要执行流程 ====================

# 尝试加载真实数据，如果没有则使用模拟数据
try:
    data_list, labels = load_and_preprocess_data()
    if len(data_list) == 0:
        raise FileNotFoundError("未找到数据文件")
except:
    print("未找到真实数据文件，使用模拟数据进行演示")
    data_list, labels = generate_mock_data()

# 特征提取
features_df = extract_features(data_list)

# 处理缺失值
imputer = SimpleImputer(strategy='mean')
features_imputed = imputer.fit_transform(features_df)
features_df = pd.DataFrame(features_imputed, columns=features_df.columns)

# 标准化特征
scaler = StandardScaler()
features_scaled = scaler.fit_transform(features_df)

# 数据集划分
X_train, X_test, y_train, y_test = train_test_split(
    features_scaled, labels, test_size=0.2, random_state=42, stratify=labels
)

print(f"\n3. 数据集划分:")
print(f"训练集: {X_train.shape[0]} 样本")
print(f"测试集: {X_test.shape[0]} 样本")
print(f"训练集ASD比例: {np.mean(y_train):.2f}")
print(f"测试集ASD比例: {np.mean(y_test):.2f}")

# ==================== 三、模型训练与评估 ====================

print("\n4. 模型训练与评估...")

# 定义多个模型
models = {
    'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42),
    'SVM': SVC(kernel='rbf', probability=True, random_state=42),
    'XGBoost': xgb.XGBClassifier(random_state=42, eval_metric='logloss')
}

# 模型评估结果存储
results = {}

# 训练和评估每个模型
for name, model in models.items():
    print(f"\n训练 {name} 模型...")
    
    # 训练模型
    model.fit(X_train, y_train)
    
    # 预测
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1] if hasattr(model, "predict_proba") else None
    
    # 计算评估指标
    from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
    
    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    
    results[name] = {
        'model': model,
        'accuracy': accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'y_pred': y_pred,
        'y_pred_proba': y_pred_proba
    }
    
    print(f"{name} 结果:")
    print(f"  准确率: {accuracy:.3f}")
    print(f"  精确率: {precision:.3f}")
    print(f"  召回率: {recall:.3f}")
    print(f"  F1分数: {f1:.3f}")

# ==================== 四、结果可视化与保存 ====================

print("\n5. 结果可视化与保存...")

# 创建保存结果的文件夹
import datetime
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
results_folder = f"ASD_TD_Classification_Results_{timestamp}"
os.makedirs(results_folder, exist_ok=True)

print(f"创建结果保存文件夹: {results_folder}")

# 创建图形
fig, axes = plt.subplots(2, 3, figsize=(18, 12))
fig.suptitle('ASD与TD儿童分类任务结果分析', fontsize=16, fontweight='bold')

# 1. 特征分布对比
ax1 = axes[0, 0]
feature_cols = ['gaze_x_mean', 'gaze_y_mean', 'movement_mean']
if all(col in features_df.columns for col in feature_cols):
    asd_indices = [i for i, label in enumerate(labels) if label == 1]
    td_indices = [i for i, label in enumerate(labels) if label == 0]
    
    for i, col in enumerate(feature_cols):
        asd_data = features_df.iloc[asd_indices][col]
        td_data = features_df.iloc[td_indices][col]
        
        ax1.hist(asd_data, alpha=0.5, label=f'ASD-{col}', bins=20)
        ax1.hist(td_data, alpha=0.5, label=f'TD-{col}', bins=20)
    
    ax1.set_title('主要特征分布对比')
    ax1.set_xlabel('特征值')
    ax1.set_ylabel('频次')
    ax1.legend()

# 2. 模型性能对比
ax2 = axes[0, 1]
model_names = list(results.keys())
metrics = ['accuracy', 'precision', 'recall', 'f1']
x = np.arange(len(model_names))
width = 0.2

for i, metric in enumerate(metrics):
    values = [results[name][metric] for name in model_names]
    ax2.bar(x + i*width, values, width, label=metric.capitalize())

ax2.set_title('模型性能对比')
ax2.set_xlabel('模型')
ax2.set_ylabel('分数')
ax2.set_xticks(x + width*1.5)
ax2.set_xticklabels(model_names)
ax2.legend()
ax2.set_ylim(0, 1)

# 3. 混淆矩阵（最佳模型）
best_model_name = max(results.keys(), key=lambda x: results[x]['f1'])
best_result = results[best_model_name]

ax3 = axes[0, 2]
cm = confusion_matrix(y_test, best_result['y_pred'])
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax3)
ax3.set_title(f'混淆矩阵 - {best_model_name}')
ax3.set_xlabel('预测标签')
ax3.set_ylabel('真实标签')

# 4. ROC曲线
ax4 = axes[1, 0]
for name, result in results.items():
    if result['y_pred_proba'] is not None:
        fpr, tpr, _ = roc_curve(y_test, result['y_pred_proba'])
        roc_auc = auc(fpr, tpr)
        ax4.plot(fpr, tpr, label=f'{name} (AUC = {roc_auc:.3f})')

ax4.plot([0, 1], [0, 1], 'k--')
ax4.set_xlim([0.0, 1.0])
ax4.set_ylim([0.0, 1.05])
ax4.set_xlabel('假正率')
ax4.set_ylabel('真正率')
ax4.set_title('ROC曲线')
ax4.legend()

# 5. 特征重要性（随机森林）
ax5 = axes[1, 1]
if 'Random Forest' in results:
    rf_model = results['Random Forest']['model']
    importances = rf_model.feature_importances_
    feature_names = features_df.columns
    
    # 选择前10个重要特征
    indices = np.argsort(importances)[-10:]
    ax5.barh(range(len(indices)), importances[indices])
    ax5.set_yticks(range(len(indices)))
    ax5.set_yticklabels([feature_names[i] for i in indices])
    ax5.set_title('特征重要性 (Random Forest)')
    ax5.set_xlabel('重要性')

# 6. 数据统计信息
ax6 = axes[1, 2]
stats_data = {
    '样本总数': len(labels),
    'ASD样本数': sum(labels),
    'TD样本数': len(labels) - sum(labels),
    '特征维度': features_df.shape[1],
    '最佳模型': best_model_name,
    '最佳F1分数': f"{best_result['f1']:.3f}"
}

text_str = '\n'.join([f'{k}: {v}' for k, v in stats_data.items()])
ax6.text(0.1, 0.5, text_str, transform=ax6.transAxes, fontsize=12,
         verticalalignment='center', bbox=dict(boxstyle="round", facecolor='lightblue'))
ax6.set_title('数据统计信息')
ax6.axis('off')

plt.tight_layout()

# 保存综合分析图
main_plot_path = os.path.join(results_folder, "comprehensive_analysis.png")
plt.savefig(main_plot_path, dpi=300, bbox_inches='tight')
print(f"保存综合分析图: {main_plot_path}")
plt.show()

# ==================== 保存单独的图表 ====================

# 1. 保存特征分布对比图
plt.figure(figsize=(12, 8))
feature_cols = ['gaze_x_mean', 'gaze_y_mean', 'movement_mean']
if all(col in features_df.columns for col in feature_cols):
    asd_indices = [i for i, label in enumerate(labels) if label == 1]
    td_indices = [i for i, label in enumerate(labels) if label == 0]
    
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))
    
    for i, col in enumerate(feature_cols):
        asd_data = features_df.iloc[asd_indices][col]
        td_data = features_df.iloc[td_indices][col]
        
        axes[i].hist(asd_data, alpha=0.6, label='ASD', bins=20, color='red')
        axes[i].hist(td_data, alpha=0.6, label='TD', bins=20, color='blue')
        axes[i].set_title(f'{col} 分布对比')
        axes[i].set_xlabel('特征值')
        axes[i].set_ylabel('频次')
        axes[i].legend()
    
    plt.suptitle('ASD与TD特征分布对比', fontsize=16)
    plt.tight_layout()
    feature_dist_path = os.path.join(results_folder, "feature_distribution_comparison.png")
    plt.savefig(feature_dist_path, dpi=300, bbox_inches='tight')
    print(f"保存特征分布图: {feature_dist_path}")
    plt.show()

# 2. 保存模型性能对比图
plt.figure(figsize=(12, 6))
model_names = list(results.keys())
metrics = ['accuracy', 'precision', 'recall', 'f1']
x = np.arange(len(model_names))
width = 0.2

for i, metric in enumerate(metrics):
    values = [results[name][metric] for name in model_names]
    plt.bar(x + i*width, values, width, label=metric.capitalize())

plt.title('模型性能对比', fontsize=16)
plt.xlabel('模型')
plt.ylabel('分数')
plt.xticks(x + width*1.5, model_names)
plt.legend()
plt.ylim(0, 1)
plt.grid(True, alpha=0.3)

model_perf_path = os.path.join(results_folder, "model_performance_comparison.png")
plt.savefig(model_perf_path, dpi=300, bbox_inches='tight')
print(f"保存模型性能对比图: {model_perf_path}")
plt.show()

# 3. 保存混淆矩阵
best_model_name = max(results.keys(), key=lambda x: results[x]['f1'])
best_result = results[best_model_name]

plt.figure(figsize=(8, 6))
cm = confusion_matrix(y_test, best_result['y_pred'])
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
           xticklabels=['TD', 'ASD'], yticklabels=['TD', 'ASD'])
plt.title(f'混淆矩阵 - {best_model_name}', fontsize=16)
plt.xlabel('预测标签')
plt.ylabel('真实标签')

confusion_matrix_path = os.path.join(results_folder, f"confusion_matrix_{best_model_name.replace(' ', '_')}.png")
plt.savefig(confusion_matrix_path, dpi=300, bbox_inches='tight')
print(f"保存混淆矩阵图: {confusion_matrix_path}")
plt.show()

# 4. 保存ROC曲线
plt.figure(figsize=(10, 8))
for name, result in results.items():
    if result['y_pred_proba'] is not None:
        fpr, tpr, _ = roc_curve(y_test, result['y_pred_proba'])
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, linewidth=2, label=f'{name} (AUC = {roc_auc:.3f})')

plt.plot([0, 1], [0, 1], 'k--', linewidth=1, label='随机分类器')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('假正率 (False Positive Rate)')
plt.ylabel('真正率 (True Positive Rate)')
plt.title('ROC曲线对比', fontsize=16)
plt.legend()
plt.grid(True, alpha=0.3)

roc_curve_path = os.path.join(results_folder, "roc_curves.png")
plt.savefig(roc_curve_path, dpi=300, bbox_inches='tight')
print(f"保存ROC曲线图: {roc_curve_path}")
plt.show()

# 5. 保存特征重要性图
if 'Random Forest' in results:
    plt.figure(figsize=(12, 8))
    rf_model = results['Random Forest']['model']
    importances = rf_model.feature_importances_
    feature_names = features_df.columns
    
    # 选择前15个重要特征
    indices = np.argsort(importances)[-15:]
    plt.barh(range(len(indices)), importances[indices], color='skyblue')
    plt.yticks(range(len(indices)), [feature_names[i] for i in indices])
    plt.title('特征重要性分析 (Random Forest)', fontsize=16)
    plt.xlabel('重要性分数')
    plt.grid(True, alpha=0.3)
    
    feature_importance_path = os.path.join(results_folder, "feature_importance.png")
    plt.savefig(feature_importance_path, dpi=300, bbox_inches='tight')
    print(f"保存特征重要性图: {feature_importance_path}")
    plt.show()

# ==================== 五、详细评估报告与保存 ====================

print("\n" + "="*60)
print("详细分类报告")
print("="*60)

# 创建详细报告文本
report_content = []
report_content.append("ASD与TD儿童分类任务 - 详细报告")
report_content.append("=" * 60)
report_content.append(f"生成时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
report_content.append("")

# 数据统计信息
report_content.append("一、数据统计信息")
report_content.append("-" * 30)
report_content.append(f"样本总数: {len(labels)}")
report_content.append(f"ASD样本数: {sum(labels)}")
report_content.append(f"TD样本数: {len(labels) - sum(labels)}")
report_content.append(f"ASD比例: {np.mean(labels):.2%}")
report_content.append(f"特征维度: {features_df.shape[1]}")
report_content.append(f"训练集大小: {X_train.shape[0]}")
report_content.append(f"测试集大小: {X_test.shape[0]}")
report_content.append("")

# 各模型详细报告
for name, result in results.items():
    report_content.append(f"二、{name} 模型详细评估")
    report_content.append("-" * 40)
    
    # 基本指标
    report_content.append(f"准确率 (Accuracy): {result['accuracy']:.4f}")
    report_content.append(f"精确率 (Precision): {result['precision']:.4f}")
    report_content.append(f"召回率 (Recall): {result['recall']:.4f}")
    report_content.append(f"F1分数 (F1-Score): {result['f1']:.4f}")
    report_content.append("")
    
    # 分类报告
    report_content.append("详细分类报告:")
    classification_rep = classification_report(y_test, result['y_pred'], 
                                             target_names=['TD', 'ASD'],
                                             output_dict=True)
    
    for class_name in ['TD', 'ASD']:
        metrics = classification_rep[class_name]
        report_content.append(f"  {class_name}:")
        report_content.append(f"    精确率: {metrics['precision']:.4f}")
        report_content.append(f"    召回率: {metrics['recall']:.4f}")
        report_content.append(f"    F1分数: {metrics['f1-score']:.4f}")
        report_content.append(f"    支持数: {int(metrics['support'])}")
    
    report_content.append("")
    
    print(f"\n{name} 详细报告:")
    print("-" * 40)
    print(classification_report(y_test, result['y_pred'], 
                              target_names=['TD', 'ASD']))

# 最佳模型分析
best_model_name = max(results.keys(), key=lambda x: results[x]['f1'])
best_result = results[best_model_name]

report_content.append("三、最佳模型分析")
report_content.append("-" * 30)
report_content.append(f"最佳模型: {best_model_name}")
report_content.append(f"最佳F1分数: {best_result['f1']:.4f}")
report_content.append(f"对应准确率: {best_result['accuracy']:.4f}")
report_content.append("")

# 特征重要性分析
if 'Random Forest' in results:
    report_content.append("四、特征重要性分析 (基于随机森林)")
    report_content.append("-" * 40)
    rf_model = results['Random Forest']['model']
    importances = rf_model.feature_importances_
    feature_names = features_df.columns
    
    # 获取重要性排序
    indices = np.argsort(importances)[::-1]
    
    report_content.append("前10个最重要特征:")
    for i in range(min(10, len(indices))):
        idx = indices[i]
        report_content.append(f"  {i+1:2d}. {feature_names[idx]:25s}: {importances[idx]:.4f}")
    
    report_content.append("")

# 模型性能对比
report_content.append("五、模型性能对比")
report_content.append("-" * 30)
report_content.append(f"{'模型名称':<15} {'准确率':<8} {'精确率':<8} {'召回率':<8} {'F1分数':<8}")
report_content.append("-" * 50)
for name, result in results.items():
    report_content.append(f"{name:<15} {result['accuracy']:<8.4f} {result['precision']:<8.4f} "
                         f"{result['recall']:<8.4f} {result['f1']:<8.4f}")

report_content.append("")

# 保存详细报告
report_path = os.path.join(results_folder, "detailed_report.txt")
with open(report_path, 'w', encoding='utf-8') as f:
    f.write('\n'.join(report_content))

print(f"\n保存详细报告: {report_path}")

# 保存特征数据
features_path = os.path.join(results_folder, "extracted_features.csv")
features_with_labels = features_df.copy()
features_with_labels['Label'] = labels
features_with_labels['Label_Name'] = ['ASD' if label == 1 else 'TD' for label in labels]
features_with_labels.to_csv(features_path, index=False)
print(f"保存特征数据: {features_path}")

# 保存模型预测结果
predictions_data = {
    'True_Label': y_test,
    'True_Label_Name': ['ASD' if label == 1 else 'TD' for label in y_test]
}

for name, result in results.items():
    predictions_data[f'{name}_Prediction'] = result['y_pred']
    predictions_data[f'{name}_Prediction_Name'] = ['ASD' if pred == 1 else 'TD' for pred in result['y_pred']]
    if result['y_pred_proba'] is not None:
        predictions_data[f'{name}_Probability_ASD'] = result['y_pred_proba']

predictions_df = pd.DataFrame(predictions_data)
predictions_path = os.path.join(results_folder, "model_predictions.csv")
predictions_df.to_csv(predictions_path, index=False)
print(f"保存预测结果: {predictions_path}")

# 创建汇总Excel文件
try:
    with pd.ExcelWriter(os.path.join(results_folder, "classification_results_summary.xlsx")) as writer:
        # 保存特征数据
        features_with_labels.to_excel(writer, sheet_name='Features', index=False)
        
        # 保存预测结果
        predictions_df.to_excel(writer, sheet_name='Predictions', index=False)
        
        # 保存模型性能汇总
        performance_data = []
        for name, result in results.items():
            performance_data.append({
                'Model': name,
                'Accuracy': result['accuracy'],
                'Precision': result['precision'],
                'Recall': result['recall'],
                'F1_Score': result['f1']
            })
        
        performance_df = pd.DataFrame(performance_data)
        performance_df.to_excel(writer, sheet_name='Performance', index=False)
        
        # 保存特征重要性
        if 'Random Forest' in results:
            rf_model = results['Random Forest']['model']
            importance_data = pd.DataFrame({
                'Feature': features_df.columns,
                'Importance': rf_model.feature_importances_
            }).sort_values('Importance', ascending=False)
            importance_data.to_excel(writer, sheet_name='Feature_Importance', index=False)
    
    print(f"保存Excel汇总文件: {os.path.join(results_folder, 'classification_results_summary.xlsx')}")
except ImportError:
    print("注意: 需要安装openpyxl库才能保存Excel文件 (pip install openpyxl)")

print(f"\n所有结果已保存到文件夹: {results_folder}")
print("包含文件:")
print("- comprehensive_analysis.png (综合分析图)")
print("- feature_distribution_comparison.png (特征分布对比)")
print("- model_performance_comparison.png (模型性能对比)")
print("- confusion_matrix_*.png (混淆矩阵)")
print("- roc_curves.png (ROC曲线)")
print("- feature_importance.png (特征重要性)")
print("- detailed_report.txt (详细报告)")
print("- extracted_features.csv (提取的特征)")
print("- model_predictions.csv (模型预测结果)")
print("- classification_results_summary.xlsx (Excel汇总文件)")

# ==================== 六、课题总结 ====================

print("\n" + "="*60)
print("课题总结与分析")
print("="*60)

# 将总结也添加到报告中
summary_content = []
summary_content.append("\n六、课题总结与分析")
summary_content.append("-" * 30)

print(f"\n1. 最佳模型: {best_model_name}")
summary_content.append(f"1. 最佳模型: {best_model_name}")

print(f"   - 准确率: {best_result['accuracy']:.3f}")
summary_content.append(f"   - 准确率: {best_result['accuracy']:.3f}")

print(f"   - F1分数: {best_result['f1']:.3f}")
summary_content.append(f"   - F1分数: {best_result['f1']:.3f}")

print(f"\n2. 关键发现:")
summary_content.append(f"\n2. 关键发现:")

if 'Random Forest' in results:
    rf_model = results['Random Forest']['model']
    importances = rf_model.feature_importances_
    top_features = np.argsort(importances)[-5:]
    print("   - 最重要的5个特征:")
    summary_content.append("   - 最重要的5个特征:")
    for i, idx in enumerate(reversed(top_features)):
        feature_info = f"     {i+1}. {features_df.columns[idx]}: {importances[idx]:.3f}"
        print(feature_info)
        summary_content.append(feature_info)

print(f"\n3. 模型性能比较:")
summary_content.append(f"\n3. 模型性能比较:")
for name, result in results.items():
    perf_info = f"   - {name}: F1={result['f1']:.3f}, 准确率={result['accuracy']:.3f}"
    print(perf_info)
    summary_content.append(perf_info)

print(f"\n4. 实际应用建议:")
summary_content.append(f"\n4. 实际应用建议:")
applications = [
    "   - 该模型可用于辅助ASD早期筛查",
    "   - 建议结合更多临床特征提高准确性",
    "   - 需要更大规模数据集验证模型泛化能力"
]
for app in applications:
    print(app)
    summary_content.append(app)

print(f"\n5. 改进方向:")
summary_content.append(f"\n5. 改进方向:")
improvements = [
    "   - 尝试深度学习模型（LSTM、CNN）",
    "   - 增加更多时序特征工程",
    "   - 考虑集成学习方法",
    "   - 引入注意力机制分析关键时间段"
]
for imp in improvements:
    print(imp)
    summary_content.append(imp)

# 将总结追加到报告文件
with open(report_path, 'a', encoding='utf-8') as f:
    f.write('\n'.join(summary_content))

print("\n任务完成！")
print("="*60)
print(f"\n✅ 所有结果已成功保存到: {results_folder}")
print("\n📊 生成的文件包括:")
print("   🖼️  图片文件: 6个高清图表")
print("   📄 报告文件: 详细的分析报告")
print("   📊 数据文件: 特征数据和预测结果")
print("   📈 Excel文件: 完整的汇总数据")
print(f"\n📁 请查看文件夹: {results_folder}")
print("="*60)