"""
批量实验运行脚本 - 自动运行多个配置组合的对比实验
"""

import os
import json
import time
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from concurrent.futures import ProcessPoolExecutor
import argparse

from advanced_training import create_advanced_environment, create_advanced_agent, AdvancedTrainingManager
from parallel_upgrades_env import ParallelUpgradeEnv, generate_feasible_dependency_matrix, up_fill_dependency_matrix
from experiment_configs import (
    get_experiment_config, get_environment_config, get_training_config,
    get_recommended_experiments, validate_config_combination, print_all_configs
)

class BatchExperimentRunner:
    """批量实验运行器"""
    
    def __init__(self, base_log_dir="./batch_experiments"):
        self.base_log_dir = base_log_dir
        self.experiment_results = []
        os.makedirs(base_log_dir, exist_ok=True)
        
        # 创建实验记录文件
        self.results_file = os.path.join(base_log_dir, "experiment_results.json")
        self.summary_file = os.path.join(base_log_dir, "experiment_summary.csv")
        
    def run_single_experiment(self, exp_name, env_name, train_name, seed=42, gpu_id=None):
        """运行单个实验"""
        print(f"\n{'='*80}")
        print(f"开始实验: {exp_name} + {env_name} + {train_name} (seed={seed})")
        print(f"{'='*80}")
        
        # 设置随机种子
        torch.manual_seed(seed)
        np.random.seed(seed)
        
        # 设置GPU
        if gpu_id is not None:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
        
        # 获取配置
        exp_config = get_experiment_config(exp_name)
        env_config = get_environment_config(env_name)
        train_config = get_training_config(train_name)
        
        # 验证配置组合
        warnings = validate_config_combination(exp_config, env_config, train_config)
        if warnings:
            print("配置警告:")
            for warning in warnings:
                print(f"  ⚠️  {warning}")
        
        # 创建实验目录
        exp_dir = os.path.join(
            self.base_log_dir, 
            f"{exp_name}_{env_name}_{train_name}_seed{seed}"
        )
        os.makedirs(exp_dir, exist_ok=True)
        
        # 保存实验配置
        config_info = {
            'experiment_config': exp_config,
            'environment_config': env_config,
            'training_config': train_config,
            'seed': seed,
            'timestamp': datetime.now().isoformat(),
            'warnings': warnings
        }
        
        with open(os.path.join(exp_dir, 'config.json'), 'w') as f:
            json.dump(config_info, f, indent=2)
        
        try:
            # 创建环境
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
            env = self._create_environment(env_config, device)
            
            # 创建智能体
            agent = self._create_agent(env, exp_config)
            
            # 创建训练管理器
            training_manager = AdvancedTrainingManager(env, agent, exp_dir)
            
            # 记录开始时间
            start_time = time.time()
            
            # 运行训练
            final_eval = training_manager.train(
                num_episodes=train_config['num_episodes'],
                eval_interval=train_config['eval_interval'],
                save_interval=train_config['save_interval']
            )
            
            # 记录结束时间
            end_time = time.time()
            training_time = end_time - start_time
            
            # 整理结果
            result = {
                'experiment_name': exp_name,
                'environment_name': env_name,
                'training_name': train_name,
                'seed': seed,
                'training_time_seconds': training_time,
                'training_time_hours': training_time / 3600,
                'final_evaluation': final_eval,
                'experiment_dir': exp_dir,
                'status': 'completed',
                'error': None,
                'config': config_info
            }
            
            # 添加详细的性能指标
            self._extract_detailed_metrics(result, training_manager, agent)
            
            print(f"\n实验完成！用时: {training_time/3600:.2f}小时")
            print(f"最终性能: 平均奖励={final_eval.get('avg_reward', 0):.2f}, "
                  f"成功率={final_eval.get('success_rate', 0):.2%}")
            
            return result
            
        except Exception as e:
            print(f"实验失败: {str(e)}")
            result = {
                'experiment_name': exp_name,
                'environment_name': env_name,
                'training_name': train_name,
                'seed': seed,
                'status': 'failed',
                'error': str(e),
                'experiment_dir': exp_dir,
                'config': config_info
            }
            return result
    
    def _create_environment(self, env_config, device):
        """根据配置创建环境"""
        # 生成依赖矩阵
        dependency_matrix = generate_feasible_dependency_matrix(
            env_config['num_components'],
            env_config['max_versions'],
            env_config['dep_prob']
        )
        dependency_matrix = up_fill_dependency_matrix(
            env_config['num_components'],
            env_config['max_versions'],
            dependency_matrix
        )
        
        # 生成跳跃矩阵
        jump_matrix = np.zeros((
            env_config['num_components'],
            env_config['max_versions'],
            env_config['max_versions']
        ))
        
        for i in range(env_config['num_components']):
            for v1 in range(env_config['max_versions']):
                for v2 in range(v1 + 1, env_config['max_versions']):
                    distance = v2 - v1
                    jump_prob = max(0.1, 0.8 - 0.1 * distance)
                    jump_matrix[i, v1, v2] = np.random.choice([0, 1], p=[1-jump_prob, jump_prob])
        
        # 创建环境
        env = ParallelUpgradeEnv(
            num_components=env_config['num_components'],
            time_windows=env_config['time_windows'],
            dependency_matrix=dependency_matrix,
            jump_matrix=jump_matrix,
            max_parallel=env_config['max_parallel'],
            device=device
        )
        
        return env
    
    def _create_agent(self, env, exp_config):
        """根据配置创建智能体"""
        from advanced_dqn_agent import AdvancedGNNDQNAgent
        
        agent = AdvancedGNNDQNAgent(
            dependency_matrix=env.dependency_matrix.cpu().numpy(),
            jump_matrix=env.jump_matrix.cpu().numpy(),
            upgrade_time=env.upgrade_time.cpu().numpy(),
            device=str(env.device),
            graph_hid_dim=exp_config['graph_hid_dim'],
            q_net_hid_dim=exp_config['q_net_hid_dim'],
            agent_feat_dim=exp_config['agent_feat_dim'],
            lr=exp_config['lr'],
            gamma=exp_config['gamma'],
            eps_start=exp_config['eps_start'],
            eps_end=exp_config['eps_end'],
            eps_decay=exp_config['eps_decay'],
            target_update=exp_config['target_update'],
            use_double_dqn=exp_config['use_double_dqn'],
            use_dueling=exp_config['use_dueling'],
            use_noisy=exp_config['use_noisy'],
            use_priority_buffer=exp_config['use_priority_buffer']
        )
        
        return agent
    
    def _extract_detailed_metrics(self, result, training_manager, agent):
        """提取详细的性能指标"""
        # 训练过程指标
        result['training_metrics'] = {
            'final_reward': training_manager.episode_rewards[-1] if training_manager.episode_rewards else 0,
            'best_reward': training_manager.best_reward,
            'avg_reward_last_100': np.mean(training_manager.episode_rewards[-100:]) if len(training_manager.episode_rewards) >= 100 else 0,
            'reward_std_last_100': np.std(training_manager.episode_rewards[-100:]) if len(training_manager.episode_rewards) >= 100 else 0,
            'success_rate_final': training_manager.success_rates[-1] if training_manager.success_rates else 0,
            'avg_success_rate': np.mean(training_manager.success_rates) if training_manager.success_rates else 0,
            'convergence_episode': self._find_convergence_point(training_manager.episode_rewards),
            'training_stability': self._compute_training_stability(training_manager.episode_rewards)
        }
        
        # 网络学习指标
        if agent.training_metrics['loss_history']:
            result['learning_metrics'] = {
                'final_loss': agent.training_metrics['loss_history'][-1],
                'avg_loss': np.mean(agent.training_metrics['loss_history']),
                'loss_std': np.std(agent.training_metrics['loss_history']),
                'final_q_value': agent.training_metrics['q_value_history'][-1] if agent.training_metrics['q_value_history'] else 0,
                'avg_q_value': np.mean(agent.training_metrics['q_value_history']) if agent.training_metrics['q_value_history'] else 0
            }
    
    def _find_convergence_point(self, rewards, window=200, threshold=0.1):
        """寻找收敛点"""
        if len(rewards) < window * 2:
            return len(rewards)
        
        for i in range(window, len(rewards) - window):
            recent_mean = np.mean(rewards[i:i+window])
            future_mean = np.mean(rewards[i+window:i+2*window])
            
            if abs(recent_mean - future_mean) / max(abs(recent_mean), 1) < threshold:
                return i
        
        return len(rewards)
    
    def _compute_training_stability(self, rewards, window=100):
        """计算训练稳定性"""
        if len(rewards) < window:
            return 0
        
        # 计算最后100个回合的变异系数
        recent_rewards = rewards[-window:]
        mean_reward = np.mean(recent_rewards)
        std_reward = np.std(recent_rewards)
        
        if mean_reward == 0:
            return float('inf')
        
        return std_reward / abs(mean_reward)  # 变异系数
    
    def run_batch_experiments(self, experiment_list, num_seeds=3, parallel=False, max_workers=None):
        """批量运行实验"""
        print(f"准备运行 {len(experiment_list)} 个实验配置，每个配置运行 {num_seeds} 次")
        print(f"总共将运行 {len(experiment_list) * num_seeds} 个实验")
        
        if parallel and max_workers is None:
            max_workers = min(4, os.cpu_count())  # 默认使用4个进程或CPU核心数
        
        all_experiments = []
        for exp_config in experiment_list:
            for seed in range(42, 42 + num_seeds):  # 使用不同的随机种子
                all_experiments.append({
                    'exp_name': exp_config['experiment'],
                    'env_name': exp_config['environment'],
                    'train_name': exp_config['training'],
                    'seed': seed,
                    'purpose': exp_config.get('purpose', '')
                })
        
        print(f"实验列表已生成，共 {len(all_experiments)} 个实验")
        
        results = []
        if parallel:
            print(f"使用并行处理，最大工作进程数: {max_workers}")
            # 注意：并行处理可能会因为GPU内存问题而失败
            # 建议先用串行方式测试
            with ProcessPoolExecutor(max_workers=max_workers) as executor:
                futures = []
                for exp in all_experiments:
                    future = executor.submit(
                        self.run_single_experiment,
                        exp['exp_name'], exp['env_name'], exp['train_name'], exp['seed']
                    )
                    futures.append((future, exp))
                
                for future, exp in futures:
                    try:
                        result = future.result(timeout=3600*4)  # 4小时超时
                        result['purpose'] = exp['purpose']
                        results.append(result)
                        self._save_intermediate_results(results)
                    except Exception as e:
                        print(f"实验失败: {exp}, 错误: {e}")
                        results.append({
                            'experiment_name': exp['exp_name'],
                            'environment_name': exp['env_name'],
                            'training_name': exp['train_name'],
                            'seed': exp['seed'],
                            'status': 'timeout_or_error',
                            'error': str(e),
                            'purpose': exp['purpose']
                        })
        else:
            # 串行处理
            for i, exp in enumerate(all_experiments):
                print(f"\n进度: {i+1}/{len(all_experiments)}")
                try:
                    result = self.run_single_experiment(
                        exp['exp_name'], exp['env_name'], exp['train_name'], exp['seed']
                    )
                    result['purpose'] = exp['purpose']
                    results.append(result)
                except Exception as e:
                    print(f"实验失败: {exp}, 错误: {e}")
                    results.append({
                        'experiment_name': exp['exp_name'],
                        'environment_name': exp['env_name'],
                        'training_name': exp['train_name'],
                        'seed': exp['seed'],
                        'status': 'failed',
                        'error': str(e),
                        'purpose': exp['purpose']
                    })
                
                # 每完成一个实验就保存一次结果
                self._save_intermediate_results(results)
        
        # 保存最终结果
        self.experiment_results = results
        self._save_final_results()
        
        return results
    
    def _save_intermediate_results(self, results):
        """保存中间结果"""
        with open(self.results_file, 'w') as f:
            json.dump(results, f, indent=2, default=str)
    
    def _save_final_results(self):
        """保存最终结果"""
        # 保存JSON格式
        with open(self.results_file, 'w') as f:
            json.dump(self.experiment_results, f, indent=2, default=str)
        
        # 保存CSV格式的摘要
        summary_data = []
        for result in self.experiment_results:
            if result['status'] == 'completed':
                row = {
                    'experiment': result['experiment_name'],
                    'environment': result['environment_name'],
                    'training': result['training_name'],
                    'seed': result['seed'],
                    'purpose': result.get('purpose', ''),
                    'training_time_hours': result.get('training_time_hours', 0),
                    'final_avg_reward': result['final_evaluation'].get('avg_reward', 0),
                    'final_success_rate': result['final_evaluation'].get('success_rate', 0),
                    'final_avg_makespan': result['final_evaluation'].get('avg_makespan', 0),
                    'best_reward': result.get('training_metrics', {}).get('best_reward', 0),
                    'convergence_episode': result.get('training_metrics', {}).get('convergence_episode', 0),
                    'training_stability': result.get('training_metrics', {}).get('training_stability', 0)
                }
            else:
                row = {
                    'experiment': result['experiment_name'],
                    'environment': result['environment_name'],
                    'training': result['training_name'],
                    'seed': result['seed'],
                    'purpose': result.get('purpose', ''),
                    'status': result['status'],
                    'error': result.get('error', '')
                }
            
            summary_data.append(row)
        
        summary_df = pd.DataFrame(summary_data)
        summary_df.to_csv(self.summary_file, index=False)
        
        print(f"\n实验结果已保存:")
        print(f"  详细结果: {self.results_file}")
        print(f"  摘要表格: {self.summary_file}")
    
    def analyze_results(self):
        """分析实验结果"""
        if not self.experiment_results:
            print("没有实验结果可供分析")
            return
        
        # 加载结果数据
        df = pd.read_csv(self.summary_file)
        successful_df = df[df['final_avg_reward'].notna()].copy()
        
        if successful_df.empty:
            print("没有成功完成的实验")
            return
        
        print("\n" + "="*80)
        print("实验结果分析")
        print("="*80)
        
        # 基础统计
        print(f"成功完成的实验: {len(successful_df)}")
        print(f"失败的实验: {len(df) - len(successful_df)}")
        
        # 按实验类型分组分析
        exp_analysis = successful_df.groupby('experiment').agg({
            'final_avg_reward': ['mean', 'std', 'max'],
            'final_success_rate': ['mean', 'std', 'max'],
            'training_time_hours': ['mean', 'std'],
            'convergence_episode': ['mean', 'std']
        }).round(4)
        
        print("\n按实验配置分组的性能对比:")
        print("-" * 60)
        print(exp_analysis)
        
        # 按环境复杂度分析
        env_analysis = successful_df.groupby('environment').agg({
            'final_avg_reward': ['mean', 'std'],
            'final_success_rate': ['mean', 'std'],
            'training_time_hours': ['mean', 'std']
        }).round(4)
        
        print("\n按环境复杂度的性能对比:")
        print("-" * 60)
        print(env_analysis)
        
        # 创建可视化
        self._create_analysis_plots(successful_df)
        
        # 寻找最佳配置
        best_config = successful_df.loc[successful_df['final_avg_reward'].idxmax()]
        print("\n最佳配置:")
        print("-" * 30)
        print(f"实验: {best_config['experiment']}")
        print(f"环境: {best_config['environment']}")
        print(f"训练: {best_config['training']}")
        print(f"种子: {best_config['seed']}")
        print(f"平均奖励: {best_config['final_avg_reward']:.4f}")
        print(f"成功率: {best_config['final_success_rate']:.2%}")
        print(f"训练时间: {best_config['training_time_hours']:.2f}小时")
    
    def _create_analysis_plots(self, df):
        """创建分析图表"""
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('批量实验结果分析', fontsize=16, fontweight='bold')
        
        # 1. 不同实验配置的性能对比
        sns.boxplot(data=df, x='experiment', y='final_avg_reward', ax=axes[0,0])
        axes[0,0].set_title('不同实验配置的奖励分布')
        axes[0,0].tick_params(axis='x', rotation=45)
        
        # 2. 成功率对比
        sns.boxplot(data=df, x='experiment', y='final_success_rate', ax=axes[0,1])
        axes[0,1].set_title('不同实验配置的成功率分布')
        axes[0,1].tick_params(axis='x', rotation=45)
        
        # 3. 训练时间对比
        sns.boxplot(data=df, x='experiment', y='training_time_hours', ax=axes[1,0])
        axes[1,0].set_title('不同实验配置的训练时间')
        axes[1,0].tick_params(axis='x', rotation=45)
        
        # 4. 奖励vs成功率散点图
        sns.scatterplot(data=df, x='final_avg_reward', y='final_success_rate', 
                       hue='experiment', style='environment', ax=axes[1,1])
        axes[1,1].set_title('奖励与成功率的关系')
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.base_log_dir, 'batch_analysis.png'), 
                   dpi=300, bbox_inches='tight')
        plt.show()

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='批量实验运行器')
    parser.add_argument('--mode', choices=['quick', 'ablation', 'full', 'custom'], 
                       default='quick', help='实验模式')
    parser.add_argument('--seeds', type=int, default=3, help='每个配置的随机种子数量')
    parser.add_argument('--parallel', action='store_true', help='是否并行运行')
    parser.add_argument('--max-workers', type=int, help='最大并行工作进程数')
    parser.add_argument('--list-configs', action='store_true', help='列出所有可用配置')
    
    args = parser.parse_args()
    
    if args.list_configs:
        print_all_configs()
        return
    
    # 创建批量实验运行器
    runner = BatchExperimentRunner()
    
    # 根据模式选择实验
    if args.mode == 'quick':
        experiments = [
            {'experiment': 'baseline', 'environment': 'simple', 'training': 'quick_test'},
            {'experiment': 'rainbow', 'environment': 'simple', 'training': 'quick_test'},
        ]
        print("快速测试模式: 运行基础配置验证")
        
    elif args.mode == 'ablation':
        experiments = [
            {'experiment': 'baseline', 'environment': 'medium', 'training': 'ablation_study'},
            {'experiment': 'double_dqn', 'environment': 'medium', 'training': 'ablation_study'},
            {'experiment': 'dueling', 'environment': 'medium', 'training': 'ablation_study'},
            {'experiment': 'noisy', 'environment': 'medium', 'training': 'ablation_study'},
            {'experiment': 'priority', 'environment': 'medium', 'training': 'ablation_study'},
            {'experiment': 'rainbow', 'environment': 'medium', 'training': 'ablation_study'},
        ]
        print("消融研究模式: 测试各个组件的贡献")
        
    elif args.mode == 'full':
        experiments = get_recommended_experiments()
        print("完整实验模式: 运行所有推荐的实验组合")
        
    else:  # custom
        print("自定义模式: 请修改代码中的experiments列表")
        experiments = [
            {'experiment': 'rainbow', 'environment': 'medium', 'training': 'standard'},
        ]
    
    print(f"将运行 {len(experiments)} 个实验配置")
    
    # 运行实验
    results = runner.run_batch_experiments(
        experiments, 
        num_seeds=args.seeds,
        parallel=args.parallel,
        max_workers=args.max_workers
    )
    
    # 分析结果
    runner.analyze_results()
    
    print(f"\n批量实验完成！结果保存在: {runner.base_log_dir}")

if __name__ == "__main__":
    main()