#!/usr/bin/env python3
"""
数据集合并与4折交叉验证训练脚本

功能:
1. 从 maloss.csv 中提取恶意样本
2. 从 robust.csv 中提取良性样本
3. 合并数据集进行4折交叉验证训练和评估

使用方法:
    python merge_and_train.py --malicious-csv maloss.csv --benign-csv robust.csv --output merged_results
"""

import pandas as pd
import numpy as np
import argparse
import os
import json
from pathlib import Path
from datetime import datetime
import time
from typing import Tuple, List, Dict, Any
import warnings
warnings.filterwarnings('ignore')

from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import (
    classification_report, confusion_matrix, 
    accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
)

# 导入训练器
from malware_classifier1 import MalwareClassifier

class DatasetMerger:
    """数据集合并器"""
    
    def __init__(self, random_state: int = None):
        self.random_state = random_state if random_state is not None else int(time.time() * 1000) % 2147483647
        np.random.seed(self.random_state)
        
    def load_and_merge_datasets(self, malicious_csv: str, benign_csv: str) -> pd.DataFrame:
        """加载并合并恶意和良性数据集"""
        print("🔍 正在加载数据集...")
        
        # 加载恶意数据集
        df_malicious = pd.read_csv(malicious_csv)
        print(f"恶意数据集 ({malicious_csv}):")
        print(f"  总样本数: {len(df_malicious)}")
        
        # 如果有label列，只保留恶意样本
        if 'label' in df_malicious.columns:
            df_malicious = df_malicious[df_malicious['label'] == 1].copy()
            print(f"  过滤后恶意样本: {len(df_malicious)}")
        else:
            # 添加标签列
            df_malicious['label'] = 1
            print(f"  已添加恶意标签")
        
        # 加载良性数据集
        df_benign = pd.read_csv(benign_csv)
        print(f"\n良性数据集 ({benign_csv}):")
        print(f"  总样本数: {len(df_benign)}")
        
        # 如果有label列，只保留良性样本
        if 'label' in df_benign.columns:
            df_benign = df_benign[df_benign['label'] == 0].copy()
            print(f"  过滤后良性样本: {len(df_benign)}")
        else:
            # 添加标签列
            df_benign['label'] = 0
            print(f"  已添加良性标签")
        
        # 特征兼容性检查
        print(f"\n🔧 特征兼容性检查:")
        mal_features = set(df_malicious.columns)
        ben_features = set(df_benign.columns)
        
        # 识别特征列（排除元数据列）
        meta_columns = {'package_name', 'label', 'version', 'package_version', 'data_type'}
        mal_feature_cols = mal_features - meta_columns
        ben_feature_cols = ben_features - meta_columns
        
        common_features = mal_feature_cols.intersection(ben_feature_cols)
        missing_in_malicious = ben_feature_cols - mal_feature_cols
        missing_in_benign = mal_feature_cols - ben_feature_cols
        
        print(f"  恶意数据集特征数: {len(mal_feature_cols)}")
        print(f"  良性数据集特征数: {len(ben_feature_cols)}")
        print(f"  共同特征数: {len(common_features)}")
        
        # 处理缺失特征
        required_columns = ['package_name', 'label'] + list(common_features)
        
        # 为恶意数据集添加缺失特征
        for col in missing_in_malicious:
            if col not in df_malicious.columns:
                df_malicious[col] = 0
                print(f"  为恶意数据集添加特征: {col}")
        
        # 为良性数据集添加缺失特征
        for col in missing_in_benign:
            if col not in df_benign.columns:
                df_benign[col] = 0
                print(f"  为良性数据集添加特征: {col}")
        
        # 确保包名列存在
        if 'package_name' not in df_malicious.columns:
            df_malicious['package_name'] = [f"malicious_package_{i}" for i in range(len(df_malicious))]
        if 'package_name' not in df_benign.columns:
            df_benign['package_name'] = [f"benign_package_{i}" for i in range(len(df_benign))]
        
        # 统一特征列顺序
        all_feature_cols = list(mal_feature_cols.union(ben_feature_cols))
        final_columns = ['package_name', 'label'] + all_feature_cols
        
        # 确保两个数据集都有所有列
        for col in final_columns:
            if col not in df_malicious.columns:
                df_malicious[col] = 0
            if col not in df_benign.columns:
                df_benign[col] = 0
        
        # 选择最终列并合并
        df_malicious = df_malicious[final_columns]
        df_benign = df_benign[final_columns]
        
        # 合并数据集
        merged_df = pd.concat([df_malicious, df_benign], ignore_index=True)
        
        # 打乱数据
        merged_df = merged_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)
        
        # 统计最终结果
        final_label_dist = merged_df['label'].value_counts()
        print(f"\n✅ 数据集合并完成:")
        print(f"  总样本数: {len(merged_df)}")
        print(f"  标签分布: 良性={final_label_dist.get(0, 0)}, 恶意={final_label_dist.get(1, 0)}")
        print(f"  特征数: {len(all_feature_cols)}")
        
        return merged_df
    
    def save_merged_dataset(self, merged_df: pd.DataFrame, output_path: str) -> str:
        """保存合并后的数据集"""
        output_file = f"{output_path}/merged_dataset.csv"
        os.makedirs(output_path, exist_ok=True)
        
        merged_df.to_csv(output_file, index=False)
        print(f"📁 合并数据集已保存: {output_file}")
        
        # 保存数据集信息
        info = {
            'creation_time': datetime.now().isoformat(),
            'total_samples': len(merged_df),
            'label_distribution': merged_df['label'].value_counts().to_dict(),
            'feature_count': len([col for col in merged_df.columns if col not in ['package_name', 'label']]),
            'random_state': self.random_state
        }
        
        info_file = f"{output_path}/dataset_info.json"
        with open(info_file, 'w') as f:
            json.dump(info, f, indent=2)
        
        return output_file

def run_4fold_cross_validation(csv_file: str, 
                              output_dir: str, 
                              random_state: int,
                              run_ablation: bool = True) -> Dict[str, Any]:
    """运行4折交叉验证训练流程"""
    print(f"\n🚀 开始4折交叉验证训练流程...")
    print(f"数据文件: {csv_file}")
    print(f"输出目录: {output_dir}")
    
    try:
        # 创建分类器加载数据
        temp_classifier = MalwareClassifier(random_state=random_state)
        X, y, package_names = temp_classifier.load_data(csv_file)
        print(f"数据加载完成: {X.shape[0]}样本, {X.shape[1]}特征")
        
        # 重要：保存特征名称以便后续使用
        original_feature_names = temp_classifier.feature_names.copy()
        print(f"特征名称已保存: {len(original_feature_names)} 个")
        print(f"前5个特征: {original_feature_names[:5]}")
        
        # 数据分布统计
        unique_labels, counts = np.unique(y, return_counts=True)
        print(f"标签分布: 良性={counts[0]}, 恶意={counts[1]}")
        
        # 设置4折交叉验证
        skf = StratifiedKFold(n_splits=4, shuffle=True, random_state=random_state)
        
        # 存储所有fold的结果
        all_fold_results = {
            'fold_results': [],
            'model_performance': {},
            'ablation_results': {} if run_ablation else None
        }
        
        # 模型名称列表
        model_names = [
            'Random Forest', 'Gradient Boosting', 'Logistic Regression', 
            'SVM', 'Naive Bayes', 'Decision Tree', 'XGBoost'
        ]
        
        # 初始化模型性能存储
        for model_name in model_names:
            all_fold_results['model_performance'][model_name] = {
                'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'auc': []
            }
        
        # 如果运行消融实验，初始化存储
        if run_ablation:
            ablation_types = ['rules_only', 'graph_only', 'all_features']
            for ablation_type in ablation_types:
                all_fold_results['ablation_results'][ablation_type] = {
                    'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'auc': []
                }
        
        print(f"\n📊 开始4折交叉验证...")
        
        # 执行4折交叉验证
        for fold_idx, (train_idx, test_idx) in enumerate(skf.split(X, y), 1):
            print(f"\n{'='*60}")
            print(f"🔄 第 {fold_idx}/4 折验证")
            print(f"{'='*60}")
            
            # 分割数据
            X_train, X_test = X[train_idx], X[test_idx]
            y_train, y_test = y[train_idx], y[test_idx]
            
            print(f"训练集: {len(X_train)} 样本")
            print(f"测试集: {len(X_test)} 样本")
            
            # 创建新的分类器实例
            classifier = MalwareClassifier(random_state=random_state + fold_idx)
            
            # 🔧 关键修复：设置特征名称！
            classifier.feature_names = original_feature_names.copy()
            
            # 设置数据
            classifier.X = X_train
            classifier.y = y_train
            classifier.package_names = [package_names[i] for i in train_idx]
            
            # 准备模型
            classifier.prepare_models()
            
            # 训练和评估
            classifier.train_and_evaluate(X_train, y_train, test_size=0.2, val_size=0.2)
            
            # 在测试集上评估所有模型
            fold_results = {}
            for model_name in model_names:
                if model_name in classifier.models:
                    model = classifier.models[model_name]
                    
                    # 选择合适的数据类型
                    if model_name in ['Logistic Regression', 'SVM']:
                        X_test_use = classifier.scaler.transform(X_test)
                    else:
                        X_test_use = X_test
                    
                    # 预测
                    y_pred = model.predict(X_test_use)
                    y_pred_proba = model.predict_proba(X_test_use)[:, 1] if hasattr(model, 'predict_proba') else None
                    
                    # 计算指标
                    accuracy = accuracy_score(y_test, y_pred)
                    precision = precision_score(y_test, y_pred, zero_division=0)
                    recall = recall_score(y_test, y_pred, zero_division=0)
                    f1 = f1_score(y_test, y_pred, zero_division=0)
                    auc_score = roc_auc_score(y_test, y_pred_proba) if y_pred_proba is not None else 0
                    
                    # 存储结果
                    fold_results[model_name] = {
                        'accuracy': accuracy,
                        'precision': precision,
                        'recall': recall,
                        'f1': f1,
                        'auc': auc_score
                    }
                    
                    # 添加到总结果
                    all_fold_results['model_performance'][model_name]['accuracy'].append(accuracy)
                    all_fold_results['model_performance'][model_name]['precision'].append(precision)
                    all_fold_results['model_performance'][model_name]['recall'].append(recall)
                    all_fold_results['model_performance'][model_name]['f1'].append(f1)
                    all_fold_results['model_performance'][model_name]['auc'].append(auc_score)
                    
                    print(f"{model_name}: F1={f1:.4f}, Accuracy={accuracy:.4f}, Precision={precision:.4f}, Recall={recall:.4f}")
            
            # 运行消融实验（如果启用）
            if run_ablation:
                print(f"\n🧪 消融实验 - 第{fold_idx}折")
                try:
                    ablation_fold_results = classifier.run_ablation_study(X_train, y_train, test_size=0.2, val_size=0.2)
                    
                    # 在测试集上评估消融实验的最佳模型
                    for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                        if ablation_type in classifier.ablation_results:
                            result = classifier.ablation_results[ablation_type]
                            
                            # 检查是否有错误或模型为空
                            if 'error' in result or result['best_model'] == 'None' or not result['models']:
                                print(f"  ⚠️  {ablation_type}: 跳过 - {result.get('error', '无可用模型')}")
                                # 使用默认值
                                for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                                    all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
                                continue
                            
                            best_model_name = result['best_model']
                            best_model = result['models'][best_model_name]
                            scaler = result['scaler']
                            imputer = result['imputer']
                            
                            # 获取对应的特征
                            feature_groups = classifier._define_feature_groups()
                            feature_indices = feature_groups[ablation_type]
                            
                            if len(feature_indices) == 0:
                                print(f"  ⚠️  {ablation_type}: 无特征，跳过")
                                # 使用默认值
                                for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                                    all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
                                continue
                            
                            # 准备测试数据
                            X_test_subset = X_test[:, feature_indices]
                            X_test_subset_imputed = imputer.transform(X_test_subset)
                            
                            # 选择合适的数据类型
                            if best_model_name == 'Logistic Regression':
                                X_test_subset_scaled = scaler.transform(X_test_subset_imputed)
                                X_test_use = X_test_subset_scaled
                            else:
                                X_test_use = X_test_subset_imputed
                            

                            # 预测
                            y_pred = best_model.predict(X_test_use)
                            y_pred_proba = best_model.predict_proba(X_test_use)[:, 1] if hasattr(best_model, 'predict_proba') else None
                            
                            # 计算指标
                            accuracy = accuracy_score(y_test, y_pred)
                            precision = precision_score(y_test, y_pred, zero_division=0)
                            recall = recall_score(y_test, y_pred, zero_division=0)
                            f1 = f1_score(y_test, y_pred, zero_division=0)
                            auc_score = roc_auc_score(y_test, y_pred_proba) if y_pred_proba is not None else 0
                            
                            # 添加到消融实验结果
                            all_fold_results['ablation_results'][ablation_type]['accuracy'].append(accuracy)
                            all_fold_results['ablation_results'][ablation_type]['precision'].append(precision)
                            all_fold_results['ablation_results'][ablation_type]['recall'].append(recall)
                            all_fold_results['ablation_results'][ablation_type]['f1'].append(f1)
                            all_fold_results['ablation_results'][ablation_type]['auc'].append(auc_score)
                            
                            print(f"  {ablation_type}: F1={f1:.4f}, Accuracy={accuracy:.4f}")
                
                except Exception as ablation_error:
                    print(f"  ❌ 消融实验失败: {ablation_error}")
                    # 为所有消融类型添加默认值
                    for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                        for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                            all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
            
            # 保存本折结果
            all_fold_results['fold_results'].append({
                'fold': fold_idx,
                'train_size': len(X_train),
                'test_size': len(X_test),
                'model_results': fold_results
            })
        
        # 计算平均结果和标准差
        print(f"\n{'='*80}")
        print(f"📈 4折交叉验证总结果")
        print(f"{'='*80}")
        
        # 汇总模型性能
        model_summary = {}
        best_model_name = None
        best_f1_mean = 0
        
        for model_name in model_names:
            if model_name in all_fold_results['model_performance']:
                perf = all_fold_results['model_performance'][model_name]
                
                # 计算均值和标准差
                accuracy_mean = np.mean(perf['accuracy'])
                accuracy_std = np.std(perf['accuracy'])
                precision_mean = np.mean(perf['precision'])
                precision_std = np.std(perf['precision'])
                recall_mean = np.mean(perf['recall'])
                recall_std = np.std(perf['recall'])
                f1_mean = np.mean(perf['f1'])
                f1_std = np.std(perf['f1'])
                auc_mean = np.mean(perf['auc'])
                auc_std = np.std(perf['auc'])
                
                model_summary[model_name] = {
                    'accuracy_mean': accuracy_mean,
                    'accuracy_std': accuracy_std,
                    'precision_mean': precision_mean,
                    'precision_std': precision_std,
                    'recall_mean': recall_mean,
                    'recall_std': recall_std,
                    'f1_mean': f1_mean,
                    'f1_std': f1_std,
                    'auc_mean': auc_mean,
                    'auc_std': auc_std
                }
                
                # 找到最佳模型
                if f1_mean > best_f1_mean:
                    best_f1_mean = f1_mean
                    best_model_name = model_name
                
                print(f"{model_name}:")
                print(f"  F1: {f1_mean:.4f} ± {f1_std:.4f}")
                print(f"  Accuracy: {accuracy_mean:.4f} ± {accuracy_std:.4f}")
                print(f"  Precision: {precision_mean:.4f} ± {precision_std:.4f}")
                print(f"  Recall: {recall_mean:.4f} ± {recall_std:.4f}")
                print(f"  AUC: {auc_mean:.4f} ± {auc_std:.4f}")
                print()
        
        # 汇总消融实验结果
        ablation_summary = {}
        if run_ablation and all_fold_results['ablation_results']:
            print(f"🧪 消融实验总结果:")
            print(f"{'-'*50}")
            
            for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                if ablation_type in all_fold_results['ablation_results']:
                    ablation_perf = all_fold_results['ablation_results'][ablation_type]
                    
                    f1_mean = np.mean(ablation_perf['f1'])
                    f1_std = np.std(ablation_perf['f1'])
                    accuracy_mean = np.mean(ablation_perf['accuracy'])
                    accuracy_std = np.std(ablation_perf['accuracy'])
                    
                    ablation_summary[ablation_type] = {
                        'f1_mean': f1_mean,
                        'f1_std': f1_std,
                        'accuracy_mean': accuracy_mean,
                        'accuracy_std': accuracy_std
                    }
                    
                    print(f"{ablation_type}: F1={f1_mean:.4f} ± {f1_std:.4f}, Accuracy={accuracy_mean:.4f} ± {accuracy_std:.4f}")
        
        # 构建最终结果
        final_results = {
            'experiment_id': f"4fold_cv_{int(time.time())}",
            'total_samples': X.shape[0],
            'feature_count': X.shape[1],
            'malicious_samples': int(np.sum(y == 1)),
            'benign_samples': int(np.sum(y == 0)),
            'cv_folds': 4,
            'random_state': random_state,
            'best_model': best_model_name,
            'best_f1_mean': best_f1_mean,
            'model_summary': model_summary,
            'ablation_summary': ablation_summary,
            'all_fold_results': all_fold_results
        }
        
        print(f"\n🏆 最佳模型: {best_model_name}")
        print(f"📊 最佳F1分数: {best_f1_mean:.4f}")
        
        return final_results
        
    except Exception as e:
        print(f"❌ 训练过程出错: {e}")
        import traceback
        traceback.print_exc()
        return None

def generate_cv_report(results: Dict[str, Any], output_dir: str):
    """生成4折交叉验证报告"""
    report_path = Path(output_dir) / "4fold_cv_report.txt"
    
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("="*80 + "\n")
        f.write("4折交叉验证实验报告\n")
        f.write("="*80 + "\n\n")
        
        # 基本信息
        f.write("实验基本信息\n")
        f.write("-" * 40 + "\n")
        f.write(f"实验ID: {results['experiment_id']}\n")
        f.write(f"总样本数: {results['total_samples']}\n")
        f.write(f"特征数: {results['feature_count']}\n")
        f.write(f"恶意样本: {results['malicious_samples']}\n")
        f.write(f"良性样本: {results['benign_samples']}\n")
        f.write(f"交叉验证折数: {results['cv_folds']}\n")
        f.write(f"随机种子: {results['random_state']}\n")
        f.write(f"最佳模型: {results['best_model']}\n")
        f.write(f"最佳F1分数: {results['best_f1_mean']:.4f}\n\n")
        
        # 模型性能总结
        f.write("模型性能总结 (均值 ± 标准差)\n")
        f.write("-" * 60 + "\n")
        f.write(f"{'模型名称':<20} {'F1分数':<15} {'准确率':<15} {'精确率':<15} {'召回率':<15}\n")
        f.write("-" * 60 + "\n")
        
        for model_name, summary in results['model_summary'].items():
            f1_str = f"{summary['f1_mean']:.4f}±{summary['f1_std']:.4f}"
            acc_str = f"{summary['accuracy_mean']:.4f}±{summary['accuracy_std']:.4f}"
            prec_str = f"{summary['precision_mean']:.4f}±{summary['precision_std']:.4f}"
            rec_str = f"{summary['recall_mean']:.4f}±{summary['recall_std']:.4f}"
            
            f.write(f"{model_name:<20} {f1_str:<15} {acc_str:<15} {prec_str:<15} {rec_str:<15}\n")
        
        # 消融实验结果
        if results['ablation_summary']:
            f.write(f"\n消融实验结果\n")
            f.write("-" * 40 + "\n")
            for ablation_type, summary in results['ablation_summary'].items():
                f1_str = f"{summary['f1_mean']:.4f}±{summary['f1_std']:.4f}"
                acc_str = f"{summary['accuracy_mean']:.4f}±{summary['accuracy_std']:.4f}"
                f.write(f"{ablation_type}: F1={f1_str}, Accuracy={acc_str}\n")
        
        # 详细的每折结果
        f.write(f"\n每折详细结果\n")
        f.write("-" * 40 + "\n")
        for fold_result in results['all_fold_results']['fold_results']:
            f.write(f"第{fold_result['fold']}折 (训练:{fold_result['train_size']}, 测试:{fold_result['test_size']}):\n")
            for model_name, metrics in fold_result['model_results'].items():
                f.write(f"  {model_name}: F1={metrics['f1']:.4f}, Acc={metrics['accuracy']:.4f}\n")
            f.write("\n")
        
        f.write("="*80 + "\n")
        f.write("报告生成完成\n")
        f.write("="*80 + "\n")
    
    print(f"📄 4折交叉验证报告已生成: {report_path}")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(
        description="数据集合并与4折交叉验证训练脚本",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 基本4折交叉验证
  python merge_and_train.py --malicious-csv maloss.csv --benign-csv robust.csv
  
  # 自定义输出目录和随机种子
  python merge_and_train.py --malicious-csv maloss.csv --benign-csv robust.csv --output my_results --random-state 42
  
  # 跳过消融实验（加快训练速度）
  python merge_and_train.py --malicious-csv maloss.csv --benign-csv robust.csv --no-ablation
        """
    )
    
    parser.add_argument('--malicious-csv', required=True, help='恶意样本CSV文件路径')
    parser.add_argument('--benign-csv', required=True, help='良性样本CSV文件路径')
    parser.add_argument('--output', '-o', default='merged_training_results', help='输出目录')
    parser.add_argument('--random-state', type=int, default=None, help='随机种子')
    parser.add_argument('--no-ablation', action='store_true', help='跳过消融实验')
    parser.add_argument('--only-merge', action='store_true', help='只进行数据合并，不训练')
    
    args = parser.parse_args()
    
    try:
        print("="*80)
        print("数据集合并与4折交叉验证训练脚本")
        print("="*80)
        
        # 设置随机种子
        random_state = args.random_state if args.random_state is not None else int(time.time() * 1000) % 2147483647
        print(f"随机种子: {random_state}")
        
        # 创建合并器
        merger = DatasetMerger(random_state=random_state)
        
        # 合并数据集
        merged_df = merger.load_and_merge_datasets(args.malicious_csv, args.benign_csv)
        
        # 保存合并数据集
        output_path = Path(args.output)
        merged_csv = merger.save_merged_dataset(merged_df, str(output_path))
        
        if args.only_merge:
            print("✅ 数据合并完成，跳过训练")
            return
        
        # 运行4折交叉验证
        results = run_4fold_cross_validation(
            csv_file=merged_csv,
            output_dir=str(output_path),
            random_state=random_state,
            run_ablation=not args.no_ablation
        )
        
        if results:
            # 保存实验摘要
            summary_file = output_path / "experiment_summary.json"
            with open(summary_file, 'w') as f:
                json.dump(results, f, indent=2, default=str)
            
            # 生成详细报告
            generate_cv_report(results, str(output_path))
            
            print(f"\n📊 4折交叉验证实验摘要:")
            print(f"  数据集: {results['malicious_samples']} 恶意 + {results['benign_samples']} 良性")
            print(f"  最佳模型: {results['best_model']}")
            print(f"  最佳F1分数: {results['best_f1_mean']:.4f}")
            
            # 显示前3名模型
            model_ranking = sorted(results['model_summary'].items(), 
                                 key=lambda x: x[1]['f1_mean'], reverse=True)
            print(f"\n🏆 模型排名:")
            for i, (model_name, summary) in enumerate(model_ranking[:3], 1):
                f1_mean = summary['f1_mean']
                f1_std = summary['f1_std']
                print(f"  {i}. {model_name}: {f1_mean:.4f} ± {f1_std:.4f}")
            
            print(f"\n📋 实验摘要已保存: {summary_file}")
            print("="*80)
            print("🎉 4折交叉验证完成!")
        
    except FileNotFoundError as e:
        print(f"❌ 错误: 文件不存在 - {e}")
    except Exception as e:
        print(f"❌ 错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()