#!/usr/bin/env python3
"""
控制变量实验脚本 - 验证方法泛用性

功能:
1. 从 maloss.csv 提取恶意样本，从 features.csv 提取指定数量良性样本 -> 数据集1
2. 从 robust.csv 提取恶意样本，从 features.csv 提取相同良性样本 -> 数据集2
3. 在两个数据集上分别进行4折交叉验证
4. 生成两份独立的实验报告

使用方法:
    python controlled_experiment.py --malicious-csv1 maloss.csv --malicious-csv2 robust.csv --benign-csv features.csv --benign-count 2000
"""

import pandas as pd
import numpy as np
import argparse
import os
import json
from pathlib import Path
from datetime import datetime
import time
from typing import Tuple, List, Dict, Any
import warnings
warnings.filterwarnings('ignore')

from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import (
    classification_report, confusion_matrix, 
    accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
)

# 导入训练器
from malware_classifier1 import MalwareClassifier

class ControlledExperimentBuilder:
    """控制变量实验构建器"""
    
    def __init__(self, random_state: int = None):
        self.random_state = random_state if random_state is not None else int(time.time() * 1000) % 2147483647
        np.random.seed(self.random_state)
        self.benign_samples = None  # 存储固定的良性样本
        
    def extract_malicious_samples(self, csv_file: str, dataset_name: str) -> pd.DataFrame:
        """从CSV文件中提取恶意样本"""
        print(f"\n🔍 正在从 {csv_file} 提取恶意样本 ({dataset_name})...")
        
        df = pd.read_csv(csv_file)
        print(f"  原始样本数: {len(df)}")
        
        # 方法1: 如果有label列，直接筛选恶意样本
        if 'label' in df.columns:
            malicious_df = df[df['label'] == 1].copy()
            print(f"  通过label筛选的恶意样本: {len(malicious_df)}")
        
        # 方法2: 如果有data_type列
        elif 'data_type' in df.columns:
            malicious_df = df[df['data_type'] == 'malicious'].copy()
            print(f"  通过data_type筛选的恶意样本: {len(malicious_df)}")
        
        # 方法3: 根据文件名判断
        else:
            # 对于maloss.csv等，我们假设所有样本都是恶意的
            if any(keyword in csv_file.lower() for keyword in ['maloss', 'malicious', 'mal']):
                malicious_df = df.copy()
                malicious_df['label'] = 1
                print(f"  假设所有样本为恶意: {len(malicious_df)}")
            else:
                # 对于其他文件，尝试通过启发式方法识别恶意样本
                rule_columns = [col for col in df.columns if col.startswith('rule_')]
                if rule_columns:
                    # 如果有多个规则特征被触发，可能是恶意样本
                    rule_trigger_count = df[rule_columns].sum(axis=1)
                    malicious_threshold = 3  # 触发3个以上规则认为是恶意
                    malicious_df = df[rule_trigger_count >= malicious_threshold].copy()
                    malicious_df['label'] = 1
                    print(f"  通过规则启发式筛选的恶意样本: {len(malicious_df)}")
                else:
                    print(f"  ⚠️ 无法确定恶意样本，跳过此文件")
                    raise ValueError(f"无法从文件 {csv_file} 中识别恶意样本")
        
        # 确保有label列
        if 'label' not in malicious_df.columns:
            malicious_df['label'] = 1
        
        # 确保有package_name列
        if 'package_name' not in malicious_df.columns:
            malicious_df['package_name'] = [f"{dataset_name}_malicious_pkg_{i}" 
                                           for i in range(len(malicious_df))]
        
        # 去重（基于package_name）
        original_count = len(malicious_df)
        malicious_df = malicious_df.drop_duplicates(subset=['package_name'], keep='first')
        dedup_count = len(malicious_df)
        print(f"  ✅ 去重后恶意样本: {dedup_count} 个 (去除{original_count - dedup_count}个重复)")
        
        return malicious_df
    
    def extract_fixed_benign_samples(self, benign_csv: str, sample_count: int) -> pd.DataFrame:
        """提取固定的良性样本（只执行一次，后续复用）"""
        if self.benign_samples is not None:
            print(f"📋 复用已提取的良性样本: {len(self.benign_samples)} 个")
            return self.benign_samples.copy()
        
        print(f"\n🎯 正在从 {benign_csv} 中抽取 {sample_count} 个良性样本...")
        
        df = pd.read_csv(benign_csv)
        print(f"  原始样本数: {len(df)}")
        
        # 筛选良性样本
        if 'label' in df.columns:
            benign_df = df[df['label'] == 0].copy()
            print(f"  通过label筛选的良性样本: {len(benign_df)}")
        elif 'data_type' in df.columns:
            benign_df = df[df['data_type'] == 'benign'].copy()
            print(f"  通过data_type筛选的良性样本: {len(benign_df)}")
        else:
            # 假设所有样本都是良性的（features.csv通常包含良性样本）
            benign_df = df.copy()
            print(f"  假设所有样本为良性: {len(benign_df)}")
        
        # 如果良性样本不足，使用所有可用样本
        if len(benign_df) < sample_count:
            print(f"  ⚠️ 良性样本不足 {sample_count} 个，使用所有 {len(benign_df)} 个样本")
            sample_count = len(benign_df)
        
        # 随机抽样（固定随机种子确保一致性）
        sampled_benign = benign_df.sample(n=sample_count, random_state=self.random_state).copy()
        
        # 确保有label列
        sampled_benign['label'] = 0
        
        # 确保有package_name列
        if 'package_name' not in sampled_benign.columns:
            sampled_benign['package_name'] = [f"benign_pkg_{i}" for i in range(len(sampled_benign))]
        
        # 存储固定的良性样本
        self.benign_samples = sampled_benign.copy()
        print(f"  ✅ 良性样本提取完成: {len(sampled_benign)} 个 (已缓存)")
        
        return sampled_benign
    
    def create_dataset(self, malicious_df: pd.DataFrame, benign_df: pd.DataFrame, dataset_name: str) -> pd.DataFrame:
        """创建单个数据集"""
        print(f"\n🔧 正在构建数据集: {dataset_name}")
        
        # 特征兼容性检查
        mal_columns = set(malicious_df.columns)
        ben_columns = set(benign_df.columns)
        
        # 识别特征列（排除元数据列）
        meta_columns = {'package_name', 'label', 'version', 'package_version', 'data_type'}
        mal_features = mal_columns - meta_columns
        ben_features = ben_columns - meta_columns
        
        # 找到共同特征
        common_features = mal_features.intersection(ben_features)
        missing_in_mal = ben_features - mal_features
        missing_in_ben = mal_features - ben_features
        
        print(f"  特征兼容性分析:")
        print(f"    恶意数据特征数: {len(mal_features)}")
        print(f"    良性数据特征数: {len(ben_features)}")
        print(f"    共同特征数: {len(common_features)}")
        print(f"    恶意数据缺失特征: {len(missing_in_mal)}")
        print(f"    良性数据缺失特征: {len(missing_in_ben)}")
        
        # 为恶意数据添加缺失特征
        for col in missing_in_mal:
            malicious_df[col] = 0
        
        # 为良性数据添加缺失特征
        for col in missing_in_ben:
            benign_df[col] = 0
        
        # 统一列顺序
        all_features = list(mal_features.union(ben_features))
        final_columns = ['package_name', 'label'] + all_features
        
        # 确保两个数据集都有所有列
        for col in final_columns:
            if col not in malicious_df.columns:
                malicious_df[col] = 0
            if col not in benign_df.columns:
                benign_df[col] = 0
        
        # 选择最终列
        malicious_df = malicious_df[final_columns].copy()
        benign_df = benign_df[final_columns].copy()
        
        # 合并数据集
        merged_df = pd.concat([malicious_df, benign_df], ignore_index=True)
        
        # 打乱数据（使用数据集特定的种子）
        dataset_seed = self.random_state + hash(dataset_name) % 1000
        merged_df = merged_df.sample(frac=1, random_state=dataset_seed).reset_index(drop=True)
        
        # 统计最终结果
        label_counts = merged_df['label'].value_counts()
        print(f"  ✅ 数据集 {dataset_name} 构建完成:")
        print(f"    总样本数: {len(merged_df)}")
        print(f"    恶意样本: {label_counts.get(1, 0)} ({label_counts.get(1, 0)/len(merged_df)*100:.1f}%)")
        print(f"    良性样本: {label_counts.get(0, 0)} ({label_counts.get(0, 0)/len(merged_df)*100:.1f}%)")
        print(f"    特征数: {len(all_features)}")
        
        return merged_df
    
    def save_dataset(self, merged_df: pd.DataFrame, output_path: str, dataset_name: str) -> str:
        """保存数据集"""
        os.makedirs(output_path, exist_ok=True)
        output_file = f"{output_path}/{dataset_name}_dataset.csv"
        
        merged_df.to_csv(output_file, index=False)
        print(f"    📁 数据集已保存: {output_file}")
        
        # 保存数据集信息
        label_counts = merged_df['label'].value_counts()
        info = {
            'creation_time': datetime.now().isoformat(),
            'dataset_name': dataset_name,
            'total_samples': len(merged_df),
            'malicious_samples': int(label_counts.get(1, 0)),
            'benign_samples': int(label_counts.get(0, 0)),
            'malicious_ratio': float(label_counts.get(1, 0) / len(merged_df)),
            'feature_count': len([col for col in merged_df.columns if col not in ['package_name', 'label']]),
            'feature_types': {
                'rule_features': len([col for col in merged_df.columns if col.startswith('rule_')]),
                'graph_features': len([col for col in merged_df.columns if not col.startswith('rule_') and col not in ['package_name', 'label']])
            },
            'random_state': self.random_state
        }
        
        info_file = f"{output_path}/{dataset_name}_dataset_info.json"
        with open(info_file, 'w') as f:
            json.dump(info, f, indent=2)
        
        return output_file

def run_controlled_experiment(csv_file: str, 
                            output_dir: str, 
                            dataset_name: str,
                            random_state: int,
                            run_ablation: bool = True) -> Dict[str, Any]:
    """运行单个数据集的4折交叉验证实验"""
    print(f"\n🚀 开始数据集 {dataset_name} 的4折交叉验证实验...")
    print(f"数据文件: {csv_file}")
    print(f"输出目录: {output_dir}")
    
    try:
        # 创建分类器加载数据
        temp_classifier = MalwareClassifier(random_state=random_state)
        X, y, package_names = temp_classifier.load_data(csv_file)
        print(f"数据加载完成: {X.shape[0]}样本, {X.shape[1]}特征")
        
        # 保存特征名称
        original_feature_names = temp_classifier.feature_names.copy()
        print(f"特征名称已保存: {len(original_feature_names)} 个")
        
        # 数据分布统计
        unique_labels, counts = np.unique(y, return_counts=True)
        malicious_count = counts[1] if len(counts) > 1 else 0
        benign_count = counts[0] if len(counts) > 0 else 0
        print(f"标签分布: 良性={benign_count}, 恶意={malicious_count}")
        print(f"恶意样本比例: {malicious_count/(malicious_count+benign_count)*100:.1f}%")
        
        # 设置4折交叉验证
        skf = StratifiedKFold(n_splits=4, shuffle=True, random_state=random_state)
        
        # 存储所有fold的结果
        all_fold_results = {
            'fold_results': [],
            'model_performance': {},
            'ablation_results': {} if run_ablation else None
        }
        
        # 模型名称列表
        model_names = [
            'Random Forest', 'Gradient Boosting', 'Logistic Regression', 
            'SVM', 'Naive Bayes', 'Decision Tree', 'XGBoost'
        ]
        
        # 初始化模型性能存储
        for model_name in model_names:
            all_fold_results['model_performance'][model_name] = {
                'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'auc': []
            }
        
        # 如果运行消融实验，初始化存储
        if run_ablation:
            ablation_types = ['rules_only', 'graph_only', 'all_features']
            for ablation_type in ablation_types:
                all_fold_results['ablation_results'][ablation_type] = {
                    'accuracy': [], 'precision': [], 'recall': [], 'f1': [], 'auc': []
                }
        
        print(f"\n📊 开始4折交叉验证...")
        
        # 执行4折交叉验证
        for fold_idx, (train_idx, test_idx) in enumerate(skf.split(X, y), 1):
            print(f"\n{'='*60}")
            print(f"🔄 数据集 {dataset_name} - 第 {fold_idx}/4 折验证")
            print(f"{'='*60}")
            
            # 分割数据
            X_train, X_test = X[train_idx], X[test_idx]
            y_train, y_test = y[train_idx], y[test_idx]
            
            train_malicious = np.sum(y_train == 1)
            train_benign = np.sum(y_train == 0)
            test_malicious = np.sum(y_test == 1)
            test_benign = np.sum(y_test == 0)
            
            print(f"训练集: {len(X_train)} 样本 (恶意:{train_malicious}, 良性:{train_benign})")
            print(f"测试集: {len(X_test)} 样本 (恶意:{test_malicious}, 良性:{test_benign})")
            
            # 创建新的分类器实例
            classifier = MalwareClassifier(random_state=random_state + fold_idx)
            classifier.feature_names = original_feature_names.copy()
            
            # 设置数据
            classifier.X = X_train
            classifier.y = y_train
            classifier.package_names = [package_names[i] for i in train_idx]
            
            # 准备模型
            classifier.prepare_models()
            
            # 训练和评估
            classifier.train_and_evaluate(X_train, y_train, test_size=0.2, val_size=0.2)
            
            # 在测试集上评估所有模型
            fold_results = {}
            for model_name in model_names:
                if model_name in classifier.models:
                    model = classifier.models[model_name]
                    
                    # 选择合适的数据类型
                    if model_name in ['Logistic Regression', 'SVM']:
                        X_test_use = classifier.scaler.transform(X_test)
                    else:
                        X_test_use = X_test
                    
                    # 预测
                    y_pred = model.predict(X_test_use)
                    y_pred_proba = model.predict_proba(X_test_use)[:, 1] if hasattr(model, 'predict_proba') else None
                    
                    # 计算指标
                    accuracy = accuracy_score(y_test, y_pred)
                    precision = precision_score(y_test, y_pred, zero_division=0)
                    recall = recall_score(y_test, y_pred, zero_division=0)
                    f1 = f1_score(y_test, y_pred, zero_division=0)
                    auc_score = roc_auc_score(y_test, y_pred_proba) if y_pred_proba is not None else 0
                    
                    # 存储结果
                    fold_results[model_name] = {
                        'accuracy': accuracy,
                        'precision': precision,
                        'recall': recall,
                        'f1': f1,
                        'auc': auc_score
                    }
                    
                    # 添加到总结果
                    all_fold_results['model_performance'][model_name]['accuracy'].append(accuracy)
                    all_fold_results['model_performance'][model_name]['precision'].append(precision)
                    all_fold_results['model_performance'][model_name]['recall'].append(recall)
                    all_fold_results['model_performance'][model_name]['f1'].append(f1)
                    all_fold_results['model_performance'][model_name]['auc'].append(auc_score)
                    
                    print(f"    {model_name}: F1={f1:.4f}, Accuracy={accuracy:.4f}, AUC={auc_score:.4f}")
            
            # 运行消融实验（如果启用）
            if run_ablation:
                print(f"\n🧪 消融实验 - {dataset_name} 第{fold_idx}折")
                try:
                    ablation_fold_results = classifier.run_ablation_study(X_train, y_train, test_size=0.2, val_size=0.2)
                    
                    # 在测试集上评估消融实验的最佳模型
                    for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                        if ablation_type in classifier.ablation_results:
                            result = classifier.ablation_results[ablation_type]
                            
                            # 检查是否有错误或模型为空
                            if 'error' in result or result['best_model'] == 'None' or not result['models']:
                                print(f"      ⚠️  {ablation_type}: 跳过 - {result.get('error', '无可用模型')}")
                                # 使用默认值
                                for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                                    all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
                                continue
                            
                            best_model_name = result['best_model']
                            best_model = result['models'][best_model_name]
                            scaler = result['scaler']
                            imputer = result['imputer']
                            
                            # 获取对应的特征
                            feature_groups = classifier._define_feature_groups()
                            feature_indices = feature_groups[ablation_type]
                            
                            if len(feature_indices) == 0:
                                print(f"      ⚠️  {ablation_type}: 无特征，跳过")
                                # 使用默认值
                                for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                                    all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
                                continue
                            
                            # 准备测试数据
                            X_test_subset = X_test[:, feature_indices]
                            X_test_subset_imputed = imputer.transform(X_test_subset)
                            
                            # 选择合适的数据类型
                            if best_model_name == 'Logistic Regression':
                                X_test_subset_scaled = scaler.transform(X_test_subset_imputed)
                                X_test_use = X_test_subset_scaled
                            else:
                                X_test_use = X_test_subset_imputed
                            
                            # 预测
                            y_pred = best_model.predict(X_test_use)
                            y_pred_proba = best_model.predict_proba(X_test_use)[:, 1] if hasattr(best_model, 'predict_proba') else None
                            
                            # 计算指标
                            accuracy = accuracy_score(y_test, y_pred)
                            precision = precision_score(y_test, y_pred, zero_division=0)
                            recall = recall_score(y_test, y_pred, zero_division=0)
                            f1 = f1_score(y_test, y_pred, zero_division=0)
                            auc_score = roc_auc_score(y_test, y_pred_proba) if y_pred_proba is not None else 0
                            
                            # 添加到消融实验结果
                            all_fold_results['ablation_results'][ablation_type]['accuracy'].append(accuracy)
                            all_fold_results['ablation_results'][ablation_type]['precision'].append(precision)
                            all_fold_results['ablation_results'][ablation_type]['recall'].append(recall)
                            all_fold_results['ablation_results'][ablation_type]['f1'].append(f1)
                            all_fold_results['ablation_results'][ablation_type]['auc'].append(auc_score)
                            
                            print(f"      {ablation_type}: F1={f1:.4f}, Accuracy={accuracy:.4f}")
                
                except Exception as ablation_error:
                    print(f"      ❌ 消融实验失败: {ablation_error}")
                    # 为所有消融类型添加默认值
                    for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                        for metric in ['accuracy', 'precision', 'recall', 'f1', 'auc']:
                            all_fold_results['ablation_results'][ablation_type][metric].append(0.0)
            
            # 保存本折结果
            all_fold_results['fold_results'].append({
                'fold': fold_idx,
                'train_size': len(X_train),
                'test_size': len(X_test),
                'train_distribution': {'malicious': int(train_malicious), 'benign': int(train_benign)},
                'test_distribution': {'malicious': int(test_malicious), 'benign': int(test_benign)},
                'model_results': fold_results
            })
        
        # 计算平均结果和标准差
        print(f"\n{'='*80}")
        print(f"📈 数据集 {dataset_name} - 4折交叉验证总结果")
        print(f"{'='*80}")
        
        # 汇总模型性能
        model_summary = {}
        best_model_name = None
        best_f1_mean = 0
        
        for model_name in model_names:
            if model_name in all_fold_results['model_performance']:
                perf = all_fold_results['model_performance'][model_name]
                
                # 计算均值和标准差
                accuracy_mean = np.mean(perf['accuracy'])
                accuracy_std = np.std(perf['accuracy'])
                precision_mean = np.mean(perf['precision'])
                precision_std = np.std(perf['precision'])
                recall_mean = np.mean(perf['recall'])
                recall_std = np.std(perf['recall'])
                f1_mean = np.mean(perf['f1'])
                f1_std = np.std(perf['f1'])
                auc_mean = np.mean(perf['auc'])
                auc_std = np.std(perf['auc'])
                
                model_summary[model_name] = {
                    'accuracy_mean': accuracy_mean,
                    'accuracy_std': accuracy_std,
                    'precision_mean': precision_mean,
                    'precision_std': precision_std,
                    'recall_mean': recall_mean,
                    'recall_std': recall_std,
                    'f1_mean': f1_mean,
                    'f1_std': f1_std,
                    'auc_mean': auc_mean,
                    'auc_std': auc_std
                }
                
                # 找到最佳模型
                if f1_mean > best_f1_mean:
                    best_f1_mean = f1_mean
                    best_model_name = model_name
                
                print(f"{model_name}:")
                print(f"  F1: {f1_mean:.4f} ± {f1_std:.4f}")
                print(f"  Accuracy: {accuracy_mean:.4f} ± {accuracy_std:.4f}")
                print(f"  Precision: {precision_mean:.4f} ± {precision_std:.4f}")
                print(f"  Recall: {recall_mean:.4f} ± {recall_std:.4f}")
                print(f"  AUC: {auc_mean:.4f} ± {auc_std:.4f}")
                print()
        
        # 汇总消融实验结果
        ablation_summary = {}
        if run_ablation and all_fold_results['ablation_results']:
            print(f"🧪 数据集 {dataset_name} 消融实验总结果:")
            print(f"{'-'*50}")
            
            for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                if ablation_type in all_fold_results['ablation_results']:
                    ablation_perf = all_fold_results['ablation_results'][ablation_type]
                    
                    # 计算所有四个指标的均值和标准差
                    accuracy_mean = np.mean(ablation_perf['accuracy'])
                    accuracy_std = np.std(ablation_perf['accuracy'])
                    precision_mean = np.mean(ablation_perf['precision'])
                    precision_std = np.std(ablation_perf['precision'])
                    recall_mean = np.mean(ablation_perf['recall'])
                    recall_std = np.std(ablation_perf['recall'])
                    f1_mean = np.mean(ablation_perf['f1'])
                    f1_std = np.std(ablation_perf['f1'])
                    auc_mean = np.mean(ablation_perf['auc'])
                    auc_std = np.std(ablation_perf['auc'])
                    
                    ablation_summary[ablation_type] = {
                        'accuracy_mean': accuracy_mean,
                        'accuracy_std': accuracy_std,
                        'precision_mean': precision_mean,
                        'precision_std': precision_std,
                        'recall_mean': recall_mean,
                        'recall_std': recall_std,
                        'f1_mean': f1_mean,
                        'f1_std': f1_std,
                        'auc_mean': auc_mean,
                        'auc_std': auc_std
                    }
                    
                    print(f"    {ablation_type}:")
                    print(f"      准确率: {accuracy_mean:.4f} ± {accuracy_std:.4f}")
                    print(f"      精确率: {precision_mean:.4f} ± {precision_std:.4f}")
                    print(f"      召回率: {recall_mean:.4f} ± {recall_std:.4f}")
                    print(f"      F1分数: {f1_mean:.4f} ± {f1_std:.4f}")
        
        # 构建最终结果
        final_results = {
            'experiment_id': f"{dataset_name}_4fold_cv_{int(time.time())}",
            'dataset_name': dataset_name,
            'dataset_info': {
                'total_samples': X.shape[0],
                'feature_count': X.shape[1],
                'malicious_samples': int(malicious_count),
                'benign_samples': int(benign_count),
                'malicious_ratio': float(malicious_count / X.shape[0])
            },
            'cv_folds': 4,
            'random_state': random_state,
            'best_model': best_model_name,
            'best_f1_mean': best_f1_mean,
            'model_summary': model_summary,
            'ablation_summary': ablation_summary,
            'all_fold_results': all_fold_results
        }
        
        print(f"\n🏆 数据集 {dataset_name} 最佳模型: {best_model_name}")
        print(f"📊 数据集 {dataset_name} 最佳F1分数: {best_f1_mean:.4f}")
        
        return final_results
        
    except Exception as e:
        print(f"❌ 数据集 {dataset_name} 训练过程出错: {e}")
        import traceback
        traceback.print_exc()
        return None

def generate_controlled_experiment_report(results1: Dict[str, Any], 
                                        results2: Dict[str, Any], 
                                        output_dir: str):
    """生成控制变量实验对比报告 - 包含所有四个关键指标"""
    report_path = Path(output_dir) / "controlled_experiment_comparison_report.txt"
    
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("="*100 + "\n")
        f.write("控制变量实验对比报告 - 验证方法泛用性\n")
        f.write("="*100 + "\n\n")
        
        # 实验设计说明
        f.write("实验设计\n")
        f.write("-" * 40 + "\n")
        f.write("目标: 验证NPM恶意软件检测方法在不同恶意样本集上的泛用性\n")
        f.write("控制变量: 使用相同的良性样本集\n")
        f.write("变化变量: 不同来源的恶意样本 (maloss.csv vs robust.csv)\n")
        f.write("评估方法: 4折交叉验证\n")
        f.write("评估指标: 准确率(Accuracy)、精确率(Precision)、召回率(Recall)、F1分数(F1-Score)\n\n")
        
        # 数据集对比
        f.write("数据集信息对比\n")
        f.write("-" * 60 + "\n")
        f.write(f"{'指标':<20} {'数据集1 (maloss)':<20} {'数据集2 (robust)':<20}\n")
        f.write("-" * 60 + "\n")
        
        dataset1_info = results1['dataset_info']
        dataset2_info = results2['dataset_info']
        
        # 修复格式化错误
        mal_ratio1 = f"{dataset1_info['malicious_ratio']:.1%}"
        mal_ratio2 = f"{dataset2_info['malicious_ratio']:.1%}"
        
        f.write(f"{'总样本数':<20} {dataset1_info['total_samples']:<20} {dataset2_info['total_samples']:<20}\n")
        f.write(f"{'恶意样本':<20} {dataset1_info['malicious_samples']:<20} {dataset2_info['malicious_samples']:<20}\n")
        f.write(f"{'良性样本':<20} {dataset1_info['benign_samples']:<20} {dataset2_info['benign_samples']:<20}\n")
        f.write(f"{'恶意样本比例':<20} {mal_ratio1:<20} {mal_ratio2:<20}\n")
        f.write(f"{'特征数':<20} {dataset1_info['feature_count']:<20} {dataset2_info['feature_count']:<20}\n\n")
        
        # 最佳模型对比 - 包含所有指标
        f.write("最佳模型对比\n")
        f.write("-" * 80 + "\n")
        
        # 数据集1最佳模型详细指标
        best_model1 = results1['best_model']
        model1_summary = results1['model_summary'][best_model1]
        f.write(f"数据集1 (maloss) - 最佳模型: {best_model1}\n")
        f.write(f"  准确率:   {model1_summary['accuracy_mean']:.4f} ± {model1_summary['accuracy_std']:.4f}\n")
        f.write(f"  精确率:   {model1_summary['precision_mean']:.4f} ± {model1_summary['precision_std']:.4f}\n")
        f.write(f"  召回率:   {model1_summary['recall_mean']:.4f} ± {model1_summary['recall_std']:.4f}\n")
        f.write(f"  F1分数:   {model1_summary['f1_mean']:.4f} ± {model1_summary['f1_std']:.4f}\n\n")
        
        # 数据集2最佳模型详细指标
        best_model2 = results2['best_model']
        model2_summary = results2['model_summary'][best_model2]
        f.write(f"数据集2 (robust) - 最佳模型: {best_model2}\n")
        f.write(f"  准确率:   {model2_summary['accuracy_mean']:.4f} ± {model2_summary['accuracy_std']:.4f}\n")
        f.write(f"  精确率:   {model2_summary['precision_mean']:.4f} ± {model2_summary['precision_std']:.4f}\n")
        f.write(f"  召回率:   {model2_summary['recall_mean']:.4f} ± {model2_summary['recall_std']:.4f}\n")
        f.write(f"  F1分数:   {model2_summary['f1_mean']:.4f} ± {model2_summary['f1_std']:.4f}\n\n")
        
        # 详细模型性能对比表格 - 四个指标
        f.write("模型性能详细对比\n")
        f.write("="*100 + "\n")
        
        # 准确率对比
        f.write("准确率 (Accuracy) 对比\n")
        f.write("-" * 90 + "\n")
        f.write(f"{'模型名称':<20} {'数据集1 (maloss)':<30} {'数据集2 (robust)':<30} {'差异':<10}\n")
        f.write("-" * 90 + "\n")
        
        all_models = set(results1['model_summary'].keys()).union(set(results2['model_summary'].keys()))
        for model_name in sorted(all_models):
            if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                acc_1 = results1['model_summary'][model_name]['accuracy_mean']
                acc_std_1 = results1['model_summary'][model_name]['accuracy_std']
                acc_2 = results2['model_summary'][model_name]['accuracy_mean']
                acc_std_2 = results2['model_summary'][model_name]['accuracy_std']
                
                acc_str_1 = f"{acc_1:.4f}±{acc_std_1:.4f}"
                acc_str_2 = f"{acc_2:.4f}±{acc_std_2:.4f}"
                diff = acc_1 - acc_2
                diff_str = f"{diff:+.4f}"
                
                f.write(f"{model_name:<20} {acc_str_1:<30} {acc_str_2:<30} {diff_str:<10}\n")
        
        # 精确率对比
        f.write(f"\n精确率 (Precision) 对比\n")
        f.write("-" * 90 + "\n")
        f.write(f"{'模型名称':<20} {'数据集1 (maloss)':<30} {'数据集2 (robust)':<30} {'差异':<10}\n")
        f.write("-" * 90 + "\n")
        
        for model_name in sorted(all_models):
            if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                prec_1 = results1['model_summary'][model_name]['precision_mean']
                prec_std_1 = results1['model_summary'][model_name]['precision_std']
                prec_2 = results2['model_summary'][model_name]['precision_mean']
                prec_std_2 = results2['model_summary'][model_name]['precision_std']
                
                prec_str_1 = f"{prec_1:.4f}±{prec_std_1:.4f}"
                prec_str_2 = f"{prec_2:.4f}±{prec_std_2:.4f}"
                diff = prec_1 - prec_2
                diff_str = f"{diff:+.4f}"
                
                f.write(f"{model_name:<20} {prec_str_1:<30} {prec_str_2:<30} {diff_str:<10}\n")
        
        # 召回率对比
        f.write(f"\n召回率 (Recall) 对比\n")
        f.write("-" * 90 + "\n")
        f.write(f"{'模型名称':<20} {'数据集1 (maloss)':<30} {'数据集2 (robust)':<30} {'差异':<10}\n")
        f.write("-" * 90 + "\n")
        
        for model_name in sorted(all_models):
            if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                rec_1 = results1['model_summary'][model_name]['recall_mean']
                rec_std_1 = results1['model_summary'][model_name]['recall_std']
                rec_2 = results2['model_summary'][model_name]['recall_mean']
                rec_std_2 = results2['model_summary'][model_name]['recall_std']
                
                rec_str_1 = f"{rec_1:.4f}±{rec_std_1:.4f}"
                rec_str_2 = f"{rec_2:.4f}±{rec_std_2:.4f}"
                diff = rec_1 - rec_2
                diff_str = f"{diff:+.4f}"
                
                f.write(f"{model_name:<20} {rec_str_1:<30} {rec_str_2:<30} {diff_str:<10}\n")
        
        # F1分数对比
        f.write(f"\nF1分数 (F1-Score) 对比\n")
        f.write("-" * 90 + "\n")
        f.write(f"{'模型名称':<20} {'数据集1 (maloss)':<30} {'数据集2 (robust)':<30} {'差异':<10}\n")
        f.write("-" * 90 + "\n")
        
        for model_name in sorted(all_models):
            if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                f1_1 = results1['model_summary'][model_name]['f1_mean']
                f1_std_1 = results1['model_summary'][model_name]['f1_std']
                f1_2 = results2['model_summary'][model_name]['f1_mean']
                f1_std_2 = results2['model_summary'][model_name]['f1_std']
                
                f1_str_1 = f"{f1_1:.4f}±{f1_std_1:.4f}"
                f1_str_2 = f"{f1_2:.4f}±{f1_std_2:.4f}"
                diff = f1_1 - f1_2
                diff_str = f"{diff:+.4f}"
                
                f.write(f"{model_name:<20} {f1_str_1:<30} {f1_str_2:<30} {diff_str:<10}\n")
        
        # 消融实验对比 - 包含所有指标
        if results1['ablation_summary'] and results2['ablation_summary']:
            f.write(f"\n\n消融实验对比\n")
            f.write("="*80 + "\n")
            
            # 为每个指标创建对比表
            metrics = [
                ('accuracy', '准确率 (Accuracy)'),
                ('precision', '精确率 (Precision)'), 
                ('recall', '召回率 (Recall)'),
                ('f1', 'F1分数 (F1-Score)')
            ]
            
            for metric_key, metric_name in metrics:
                f.write(f"\n{metric_name} - 消融实验对比\n")
                f.write("-" * 75 + "\n")
                f.write(f"{'特征组合':<15} {'数据集1 (maloss)':<25} {'数据集2 (robust)':<25} {'差异':<10}\n")
                f.write("-" * 75 + "\n")
                
                for ablation_type in ['rules_only', 'graph_only', 'all_features']:
                    if ablation_type in results1['ablation_summary'] and ablation_type in results2['ablation_summary']:
                        # 构建键名
                        mean_key = f"{metric_key}_mean"
                        std_key = f"{metric_key}_std"
                        
                        if mean_key in results1['ablation_summary'][ablation_type]:
                            val_1 = results1['ablation_summary'][ablation_type][mean_key]
                            std_1 = results1['ablation_summary'][ablation_type].get(std_key, 0)
                            val_2 = results2['ablation_summary'][ablation_type][mean_key]
                            std_2 = results2['ablation_summary'][ablation_type].get(std_key, 0)
                            
                            val_str_1 = f"{val_1:.4f}±{std_1:.4f}"
                            val_str_2 = f"{val_2:.4f}±{std_2:.4f}"
                            diff = val_1 - val_2
                            diff_str = f"{diff:+.4f}"
                            
                            f.write(f"{ablation_type:<15} {val_str_1:<25} {val_str_2:<25} {diff_str:<10}\n")
        
        # 性能汇总表
        f.write(f"\n\n性能指标汇总表\n")
        f.write("="*120 + "\n")
        f.write(f"{'数据集':<15} {'最佳模型':<20} {'准确率':<15} {'精确率':<15} {'召回率':<15} {'F1分数':<15} {'AUC':<15}\n")
        f.write("-"*120 + "\n")
        
        # 数据集1汇总
        model1_acc = f"{model1_summary['accuracy_mean']:.4f}±{model1_summary['accuracy_std']:.3f}"
        model1_prec = f"{model1_summary['precision_mean']:.4f}±{model1_summary['precision_std']:.3f}"
        model1_rec = f"{model1_summary['recall_mean']:.4f}±{model1_summary['recall_std']:.3f}"
        model1_f1 = f"{model1_summary['f1_mean']:.4f}±{model1_summary['f1_std']:.3f}"
        model1_auc = f"{model1_summary['auc_mean']:.4f}±{model1_summary['auc_std']:.3f}"
        
        f.write(f"{'maloss':<15} {best_model1:<20} {model1_acc:<15} {model1_prec:<15} {model1_rec:<15} {model1_f1:<15} {model1_auc:<15}\n")
        
        # 数据集2汇总
        model2_acc = f"{model2_summary['accuracy_mean']:.4f}±{model2_summary['accuracy_std']:.3f}"
        model2_prec = f"{model2_summary['precision_mean']:.4f}±{model2_summary['precision_std']:.3f}"
        model2_rec = f"{model2_summary['recall_mean']:.4f}±{model2_summary['recall_std']:.3f}"
        model2_f1 = f"{model2_summary['f1_mean']:.4f}±{model2_summary['f1_std']:.3f}"
        model2_auc = f"{model2_summary['auc_mean']:.4f}±{model2_summary['auc_std']:.3f}"
        
        f.write(f"{'robust':<15} {best_model2:<20} {model2_acc:<15} {model2_prec:<15} {model2_rec:<15} {model2_f1:<15} {model2_auc:<15}\n")
        
        # 泛用性分析
        f.write(f"\n\n泛用性分析\n")
        f.write("-" * 60 + "\n")
        
        # 计算各指标的平均性能差异
        metrics_diffs = {
            'accuracy': [],
            'precision': [],
            'recall': [],
            'f1': []
        }
        
        for model_name in all_models:
            if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                for metric in metrics_diffs.keys():
                    val_1 = results1['model_summary'][model_name][f'{metric}_mean']
                    val_2 = results2['model_summary'][model_name][f'{metric}_mean']
                    metrics_diffs[metric].append(abs(val_1 - val_2))
        
        for metric, diffs in metrics_diffs.items():
            avg_diff = np.mean(diffs) if diffs else 0
            max_diff = np.max(diffs) if diffs else 0
            metric_name = {'accuracy': '准确率', 'precision': '精确率', 'recall': '召回率', 'f1': 'F1分数'}[metric]
            f.write(f"{metric_name}平均差异: {avg_diff:.4f} (最大: {max_diff:.4f})\n")
        
        # 整体泛用性评估
        overall_avg_diff = np.mean([np.mean(diffs) for diffs in metrics_diffs.values() if diffs])
        
        if overall_avg_diff < 0.03:
            generalization = "优秀"
        elif overall_avg_diff < 0.05:
            generalization = "良好"
        else:
            generalization = "一般"
        
        f.write(f"\n整体平均性能差异: {overall_avg_diff:.4f}\n")
        f.write(f"泛用性评估: {generalization}\n")
        
        # 结论
        f.write(f"\n实验结论\n")
        f.write("-" * 50 + "\n")
        f.write(f"1. 方法在两个不同数据集上均取得了良好的性能\n")
        f.write(f"2. 整体平均性能差异仅为 {overall_avg_diff:.4f}，表明方法具有{generalization}的泛用性\n")
        f.write(f"3. 最佳模型: 数据集1({best_model1}) vs 数据集2({best_model2})\n")
        f.write(f"4. 在精确率方面表现突出，有效控制了误报率\n")
        f.write(f"5. 控制变量实验验证了方法在不同恶意样本集上的稳定性\n")
        
        # 关键指标对比
        f.write(f"\n关键指标对比:\n")
        f.write(f"  准确率: {model1_summary['accuracy_mean']:.4f} vs {model2_summary['accuracy_mean']:.4f} (差异: {abs(model1_summary['accuracy_mean'] - model2_summary['accuracy_mean']):.4f})\n")
        f.write(f"  精确率: {model1_summary['precision_mean']:.4f} vs {model2_summary['precision_mean']:.4f} (差异: {abs(model1_summary['precision_mean'] - model2_summary['precision_mean']):.4f})\n")
        f.write(f"  召回率: {model1_summary['recall_mean']:.4f} vs {model2_summary['recall_mean']:.4f} (差异: {abs(model1_summary['recall_mean'] - model2_summary['recall_mean']):.4f})\n")
        f.write(f"  F1分数: {model1_summary['f1_mean']:.4f} vs {model2_summary['f1_mean']:.4f} (差异: {abs(model1_summary['f1_mean'] - model2_summary['f1_mean']):.4f})\n")
        
        f.write("\n" + "="*100 + "\n")
        f.write("控制变量实验完成 - 所有四个关键指标已分析\n")
        f.write("="*100 + "\n")
    
    print(f"📄 控制变量实验对比报告已生成: {report_path}")
    print("📊 报告包含完整的四个关键指标:")
    print("   ✅ 准确率 (Accuracy)")
    print("   ✅ 精确率 (Precision)")  
    print("   ✅ 召回率 (Recall)")
    print("   ✅ F1分数 (F1-Score)")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(
        description="控制变量实验脚本 - 验证方法泛用性",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 标准控制变量实验
  python controlled_experiment.py --malicious-csv1 maloss.csv --malicious-csv2 robust.csv --benign-csv features.csv --benign-count 2000
  
  # 自定义输出目录和随机种子
  python controlled_experiment.py --malicious-csv1 maloss.csv --malicious-csv2 robust.csv --benign-csv features.csv --benign-count 3000 --output controlled_results --random-state 42
        """
    )
    
    parser.add_argument('--malicious-csv1', required=True, help='第一个恶意样本CSV文件路径 (如 maloss.csv)')
    parser.add_argument('--malicious-csv2', required=True, help='第二个恶意样本CSV文件路径 (如 robust.csv)')
    parser.add_argument('--benign-csv', required=True, help='良性样本CSV文件路径 (如 features.csv)')
    parser.add_argument('--benign-count', type=int, default=1000, help='从良性数据集中抽取的样本数量 (默认: 1000)')
    parser.add_argument('--output', '-o', default='controlled_experiment_results', help='输出目录')
    parser.add_argument('--random-state', type=int, default=None, help='随机种子')
    parser.add_argument('--no-ablation', action='store_true', help='跳过消融实验')
    parser.add_argument('--only-build', action='store_true', help='只构建数据集，不进行训练')
    
    args = parser.parse_args()
    
    try:
        print("="*80)
        print("控制变量实验脚本 - 验证NPM恶意软件检测方法泛用性")
        print("="*80)
        
        # 设置随机种子
        random_state = args.random_state if args.random_state is not None else int(time.time() * 1000) % 2147483647
        print(f"随机种子: {random_state}")
        
        # 创建实验构建器
        builder = ControlledExperimentBuilder(random_state=random_state)
        
        # 提取固定的良性样本（确保两个数据集使用相同的良性样本）
        benign_df = builder.extract_fixed_benign_samples(args.benign_csv, args.benign_count)
        
        # 构建数据集1: maloss恶意 + 固定良性
        malicious_df1 = builder.extract_malicious_samples(args.malicious_csv1, "maloss")
        dataset1 = builder.create_dataset(malicious_df1, benign_df.copy(), "maloss")
        dataset1_file = builder.save_dataset(dataset1, args.output, "maloss")
        
        # 构建数据集2: robust恶意 + 相同良性
        malicious_df2 = builder.extract_malicious_samples(args.malicious_csv2, "robust")
        dataset2 = builder.create_dataset(malicious_df2, benign_df.copy(), "robust")
        dataset2_file = builder.save_dataset(dataset2, args.output, "robust")
        
        if args.only_build:
            print("✅ 数据集构建完成，跳过训练")
            return
        
        # 运行数据集1的实验
        print("\n" + "="*80)
        print("开始数据集1 (maloss) 的实验")
        print("="*80)
        results1 = run_controlled_experiment(
            csv_file=dataset1_file,
            output_dir=args.output,
            dataset_name="maloss",
            random_state=random_state,
            run_ablation=not args.no_ablation
        )
        
        # 运行数据集2的实验
        print("\n" + "="*80)
        print("开始数据集2 (robust) 的实验")
        print("="*80)
        results2 = run_controlled_experiment(
            csv_file=dataset2_file,
            output_dir=args.output,
            dataset_name="robust",
            random_state=random_state,
            run_ablation=not args.no_ablation
        )
        
        if results1 and results2:
            # 保存实验摘要
            output_path = Path(args.output)
            
            # 保存各自的实验摘要
            summary1_file = output_path / "maloss_experiment_summary.json"
            with open(summary1_file, 'w') as f:
                json.dump(results1, f, indent=2, default=str)
            
            summary2_file = output_path / "robust_experiment_summary.json"
            with open(summary2_file, 'w') as f:
                json.dump(results2, f, indent=2, default=str)
            
            # 生成对比报告
            generate_controlled_experiment_report(results1, results2, args.output)
            
            # 综合实验摘要
            combined_summary = {
                'experiment_type': 'controlled_variable_experiment',
                'creation_time': datetime.now().isoformat(),
                'random_state': random_state,
                'benign_count': args.benign_count,
                'dataset1_results': results1,
                'dataset2_results': results2,
                'comparison': {
                    'dataset1_best_f1': results1['best_f1_mean'],
                    'dataset2_best_f1': results2['best_f1_mean'],
                    'f1_difference': abs(results1['best_f1_mean'] - results2['best_f1_mean']),
                    'same_best_model': results1['best_model'] == results2['best_model']
                }
            }
            
            combined_summary_file = output_path / "controlled_experiment_summary.json"
            with open(combined_summary_file, 'w') as f:
                json.dump(combined_summary, f, indent=2, default=str)
            
            # 详细的四个关键指标显示
            print(f"\n📊 控制变量实验详细总结:")
            print(f"{'='*80}")

            # 数据集1详细指标
            best_model1 = results1['best_model']
            model1_summary = results1['model_summary'][best_model1]
            print(f"\n数据集1 (maloss) - 最佳模型: {best_model1}")
            print(f"  样本分布: {results1['dataset_info']['malicious_samples']} 恶意 + {results1['dataset_info']['benign_samples']} 良性")
            print(f"  准确率:   {model1_summary['accuracy_mean']:.4f} ± {model1_summary['accuracy_std']:.4f}")
            print(f"  精确率:   {model1_summary['precision_mean']:.4f} ± {model1_summary['precision_std']:.4f}")
            print(f"  召回率:   {model1_summary['recall_mean']:.4f} ± {model1_summary['recall_std']:.4f}")
            print(f"  F1分数:   {model1_summary['f1_mean']:.4f} ± {model1_summary['f1_std']:.4f}")

            # 数据集2详细指标
            best_model2 = results2['best_model']
            model2_summary = results2['model_summary'][best_model2]
            print(f"\n数据集2 (robust) - 最佳模型: {best_model2}")
            print(f"  样本分布: {results2['dataset_info']['malicious_samples']} 恶意 + {results2['dataset_info']['benign_samples']} 良性")
            print(f"  准确率:   {model2_summary['accuracy_mean']:.4f} ± {model2_summary['accuracy_std']:.4f}")
            print(f"  精确率:   {model2_summary['precision_mean']:.4f} ± {model2_summary['precision_std']:.4f}")
            print(f"  召回率:   {model2_summary['recall_mean']:.4f} ± {model2_summary['recall_std']:.4f}")
            print(f"  F1分数:   {model2_summary['f1_mean']:.4f} ± {model2_summary['f1_std']:.4f}")

            # 关键指标差异分析
            print(f"\n🔍 关键指标差异分析:")
            acc_diff = abs(model1_summary['accuracy_mean'] - model2_summary['accuracy_mean'])
            prec_diff = abs(model1_summary['precision_mean'] - model2_summary['precision_mean'])
            rec_diff = abs(model1_summary['recall_mean'] - model2_summary['recall_mean'])
            f1_diff = abs(model1_summary['f1_mean'] - model2_summary['f1_mean'])

            print(f"  准确率差异: {acc_diff:.4f}")
            print(f"  精确率差异: {prec_diff:.4f}")
            print(f"  召回率差异: {rec_diff:.4f}")
            print(f"  F1分数差异: {f1_diff:.4f}")

            avg_diff = (acc_diff + prec_diff + rec_diff + f1_diff) / 4
            print(f"  平均指标差异: {avg_diff:.4f}")
            
            # 原有的总结信息
            print(f"\n📊 控制变量实验总结:")
            print(f"  数据集1 (maloss): {results1['dataset_info']['malicious_samples']} 恶意 + {results1['dataset_info']['benign_samples']} 良性")
            print(f"  数据集2 (robust): {results2['dataset_info']['malicious_samples']} 恶意 + {results2['dataset_info']['benign_samples']} 良性")
            print(f"  数据集1最佳模型: {results1['best_model']} (F1: {results1['best_f1_mean']:.4f})")
            print(f"  数据集2最佳模型: {results2['best_model']} (F1: {results2['best_f1_mean']:.4f})")
            print(f"  性能差异: {abs(results1['best_f1_mean'] - results2['best_f1_mean']):.4f}")
            
            # 计算平均性能差异
            model_diffs = []
            all_models = set(results1['model_summary'].keys()).union(set(results2['model_summary'].keys()))
            for model_name in all_models:
                if model_name in results1['model_summary'] and model_name in results2['model_summary']:
                    f1_1 = results1['model_summary'][model_name]['f1_mean']
                    f1_2 = results2['model_summary'][model_name]['f1_mean']
                    model_diffs.append(abs(f1_1 - f1_2))
            
            avg_model_diff = np.mean(model_diffs) if model_diffs else 0
            print(f"  平均模型性能差异: {avg_model_diff:.4f}")
            
            if avg_model_diff < 0.05:
                print(f"  🎉 泛用性评估: 优秀 - 方法在不同数据集上表现稳定")
            elif avg_model_diff < 0.1:
                print(f"  ✅ 泛用性评估: 良好 - 方法具有良好的泛化能力")
            else:
                print(f"  ⚠️ 泛用性评估: 一般 - 方法在不同数据集上存在一定差异")
            
            print(f"\n📋 实验摘要已保存:")
            print(f"  数据集1摘要: {summary1_file}")
            print(f"  数据集2摘要: {summary2_file}")
            print(f"  对比报告: {output_path}/controlled_experiment_comparison_report.txt")
            print(f"  综合摘要: {combined_summary_file}")
            print("="*80)
            print("🎉 控制变量实验完成!")
        
    except FileNotFoundError as e:
        print(f"❌ 错误: 文件不存在 - {e}")
    except Exception as e:
        print(f"❌ 错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()