import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
import time
import os
from tqdm import tqdm

from advanced_dqn_agent import AdvancedGNNDQNAgent
from parallel_upgrades_env import ParallelUpgradeEnv, generate_feasible_dependency_matrix, up_fill_dependency_matrix

class AdvancedTrainingManager:
    """高级训练管理器"""
    def __init__(self, env, agent, log_dir="./logs"):
        self.env = env
        self.agent = agent
        self.log_dir = log_dir
        os.makedirs(log_dir, exist_ok=True)
        
        # 训练统计
        self.episode_rewards = []
        self.episode_lengths = []
        self.episode_makespans = []
        self.episode_skip_counts = []
        self.episode_parallel_utils = []
        self.success_rates = []  # 完成率
        
        # 最佳性能记录
        self.best_reward = float('-inf')
        self.best_makespan = float('inf')
        self.best_model_path = None
        
        # 学习曲线平滑
        self.reward_window = []
        self.window_size = 100
        
    def train(self, num_episodes=2000, eval_interval=100, save_interval=500):
        """训练循环"""
        print(f"开始训练，总回合数: {num_episodes}")
        print(f"评估间隔: {eval_interval}, 保存间隔: {save_interval}")
        
        start_time = time.time()
        
        for episode in tqdm(range(num_episodes), desc="训练进度"):
            episode_reward, episode_length, episode_info = self._run_episode(is_training=True)
            
            # 记录统计信息
            self.episode_rewards.append(episode_reward)
            self.episode_lengths.append(episode_length)
            
            if 'makespan' in episode_info:
                self.episode_makespans.append(episode_info['makespan'])
            
            # 计算滑动窗口平均奖励
            self.reward_window.append(episode_reward)
            if len(self.reward_window) > self.window_size:
                self.reward_window.pop(0)
            
            avg_reward = np.mean(self.reward_window)
            
            # 更新最佳模型
            if episode_reward > self.best_reward:
                self.best_reward = episode_reward
                self.best_model_path = os.path.join(self.log_dir, f"best_model_episode_{episode}.pt")
                self.agent.save(self.best_model_path)
            
            # 定期评估
            if episode > 0 and episode % eval_interval == 0:
                eval_stats = self._evaluate(num_eval_episodes=10)
                self._log_progress(episode, avg_reward, eval_stats)
                
                # 更新成功率记录
                self.success_rates.append(eval_stats['success_rate'])
            
            # 定期保存
            if episode > 0 and episode % save_interval == 0:
                checkpoint_path = os.path.join(self.log_dir, f"checkpoint_episode_{episode}.pt")
                self.agent.save(checkpoint_path)
                self._save_training_stats()
            
            # 网络更新
            if len(self.agent.buffer) > self.agent.batch_size:
                loss = self.agent.update()
                if loss is not None and episode % 100 == 0:
                    tqdm.write(f"Episode {episode}, Loss: {loss:.4f}, Avg Reward: {avg_reward:.2f}")
        
        total_time = time.time() - start_time
        print(f"\n训练完成！总用时: {total_time:.2f}秒")
        print(f"最佳奖励: {self.best_reward:.2f}")
        print(f"最佳模型保存在: {self.best_model_path}")
        
        # 最终评估和可视化
        final_eval = self._evaluate(num_eval_episodes=50)
        self._plot_training_curves()
        self._save_training_stats()
        
        return final_eval
    
    def _run_episode(self, is_training=True, max_steps=1000):
        """运行单个回合"""
        obs = self.env.reset()
        total_reward = 0
        step_count = 0
        episode_info = {}
        
        while step_count < max_steps:
            # 获取当前状态信息
            valid_indices, action_features = self.env.get_state_action_features()
            
            if len(valid_indices) == 0:
                break
            
            # 选择动作
            action = self.agent.select_action(
                state=self.env.state,
                valid_action_indices=valid_indices,
                action_features=action_features,
                remaining_time=self.env.remaining_time / self.env.max_time_window,
                active_upgrades=self.env.active_upgrades,
                is_training=is_training
            )
            
            # 执行动作
            next_obs, reward, done, info = self.env.step(action, is_need_visualization=False)
            
            # 存储经验（仅在训练时）
            if is_training:
                next_valid_indices, next_action_features = self.env.get_state_action_features()
                
                self.agent.store_experience(
                    state=obs,
                    valid_indices=valid_indices,
                    action_features=action_features,
                    action_idx=action,
                    reward=reward,
                    next_state=next_obs,
                    next_valid_indices=next_valid_indices,
                    next_action_features=next_action_features,
                    done=done,
                    remaining_time=self.env.remaining_time / self.env.max_time_window,
                    active_upgrades=self.env.active_upgrades.copy(),
                    next_remaining_time=self.env.remaining_time / self.env.max_time_window,
                    next_active_upgrades=self.env.active_upgrades.copy()
                )
            
            obs = next_obs
            total_reward += reward
            step_count += 1
            
            if done:
                episode_info = info
                break
        
        return total_reward, step_count, episode_info
    
    def _evaluate(self, num_eval_episodes=10):
        """评估模型性能"""
        print(f"\n开始评估，评估回合数: {num_eval_episodes}")
        
        eval_rewards = []
        eval_makespans = []
        eval_success = []
        eval_skip_counts = []
        eval_parallel_utils = []
        
        for _ in range(num_eval_episodes):
            reward, length, info = self._run_episode(is_training=False)
            eval_rewards.append(reward)
            
            if 'makespan' in info:
                eval_makespans.append(info['makespan'])
                # 成功定义：在合理时间内完成所有升级
                success = reward > 0 and info['makespan'] < sum(self.env.time_windows)
                eval_success.append(success)
            else:
                eval_success.append(False)
            
            # 收集环境统计信息
            eval_skip_counts.append(getattr(self.env, 'skip_count', 0))
            if hasattr(self.env, 'parallel_util_timeline') and self.env.parallel_util_timeline:
                avg_util = np.mean(self.env.parallel_util_timeline)
                eval_parallel_utils.append(avg_util)
        
        eval_stats = {
            'avg_reward': np.mean(eval_rewards),
            'std_reward': np.std(eval_rewards),
            'avg_makespan': np.mean(eval_makespans) if eval_makespans else 0,
            'std_makespan': np.std(eval_makespans) if eval_makespans else 0,
            'success_rate': np.mean(eval_success),
            'avg_skip_count': np.mean(eval_skip_counts) if eval_skip_counts else 0,
            'avg_parallel_util': np.mean(eval_parallel_utils) if eval_parallel_utils else 0
        }
        
        return eval_stats
    
    def _log_progress(self, episode, avg_reward, eval_stats):
        """记录训练进度"""
        agent_info = self.agent.get_training_info()
        
        print(f"\n=== Episode {episode} 评估结果 ===")
        print(f"平均训练奖励: {avg_reward:.2f}")
        print(f"评估平均奖励: {eval_stats['avg_reward']:.2f} ± {eval_stats['std_reward']:.2f}")
        print(f"成功率: {eval_stats['success_rate']:.2%}")
        print(f"平均完成时间: {eval_stats['avg_makespan']:.1f} ± {eval_stats['std_makespan']:.1f}")
        print(f"平均跳过次数: {eval_stats['avg_skip_count']:.1f}")
        print(f"平均并行利用率: {eval_stats['avg_parallel_util']:.2%}")
        print(f"当前Epsilon: {agent_info['current_epsilon']:.3f}")
        print(f"经验池大小: {agent_info['buffer_size']}")
        if agent_info['recent_loss']:
            print(f"最近损失: {np.mean(agent_info['recent_loss']):.4f}")
    
    def _plot_training_curves(self):
        """绘制训练曲线"""
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        fig.suptitle('训练过程分析', fontsize=16, fontweight='bold')
        
        # 1. 奖励曲线
        axes[0, 0].plot(self.episode_rewards, alpha=0.6, color='blue', linewidth=0.8)
        if len(self.episode_rewards) > 100:
            smoothed = self._smooth_curve(self.episode_rewards, window=100)
            axes[0, 0].plot(smoothed, color='red', linewidth=2, label='平滑曲线')
        axes[0, 0].set_title('回合奖励')
        axes[0, 0].set_xlabel('回合')
        axes[0, 0].set_ylabel('奖励')
        axes[0, 0].grid(True, alpha=0.3)
        axes[0, 0].legend()
        
        # 2. 完成时间
        if self.episode_makespans:
            axes[0, 1].plot(self.episode_makespans, alpha=0.6, color='green')
            if len(self.episode_makespans) > 50:
                smoothed = self._smooth_curve(self.episode_makespans, window=50)
                axes[0, 1].plot(smoothed, color='darkgreen', linewidth=2)
            axes[0, 1].set_title('任务完成时间')
            axes[0, 1].set_xlabel('回合')
            axes[0, 1].set_ylabel('时间步')
            axes[0, 1].grid(True, alpha=0.3)
        
        # 3. 成功率
        if self.success_rates:
            axes[0, 2].plot(self.success_rates, marker='o', markersize=4, color='purple')
            axes[0, 2].set_title('成功率趋势')
            axes[0, 2].set_xlabel('评估轮次')
            axes[0, 2].set_ylabel('成功率')
            axes[0, 2].set_ylim([0, 1])
            axes[0, 2].grid(True, alpha=0.3)
        
        # 4. 损失曲线
        if self.agent.training_metrics['loss_history']:
            loss_history = self.agent.training_metrics['loss_history']
            axes[1, 0].plot(loss_history, alpha=0.6, color='orange')
            if len(loss_history) > 50:
                smoothed = self._smooth_curve(loss_history, window=50)
                axes[1, 0].plot(smoothed, color='darkorange', linewidth=2)
            axes[1, 0].set_title('训练损失')
            axes[1, 0].set_xlabel('更新步数')
            axes[1, 0].set_ylabel('损失')
            axes[1, 0].set_yscale('log')
            axes[1, 0].grid(True, alpha=0.3)
        
        # 5. Q值趋势
        if self.agent.training_metrics['q_value_history']:
            q_history = self.agent.training_metrics['q_value_history']
            axes[1, 1].plot(q_history, alpha=0.6, color='teal')
            if len(q_history) > 50:
                smoothed = self._smooth_curve(q_history, window=50)
                axes[1, 1].plot(smoothed, color='darkcyan', linewidth=2)
            axes[1, 1].set_title('平均Q值')
            axes[1, 1].set_xlabel('更新步数')
            axes[1, 1].set_ylabel('Q值')
            axes[1, 1].grid(True, alpha=0.3)
        
        # 6. Epsilon衰减
        if self.agent.training_metrics['epsilon_history']:
            axes[1, 2].plot(self.agent.training_metrics['epsilon_history'], color='red')
            axes[1, 2].set_title('探索率衰减')
            axes[1, 2].set_xlabel('更新步数')
            axes[1, 2].set_ylabel('Epsilon')
            axes[1, 2].grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(os.path.join(self.log_dir, 'training_curves.png'), dpi=300, bbox_inches='tight')
        plt.show()
    
    def _smooth_curve(self, data, window=50):
        """平滑曲线"""
        if len(data) < window:
            return data
        
        smoothed = []
        for i in range(len(data)):
            start = max(0, i - window // 2)
            end = min(len(data), i + window // 2 + 1)
            smoothed.append(np.mean(data[start:end]))
        return smoothed
    
    def _save_training_stats(self):
        """保存训练统计信息"""
        stats = {
            'episode_rewards': self.episode_rewards,
            'episode_lengths': self.episode_lengths,
            'episode_makespans': self.episode_makespans,
            'success_rates': self.success_rates,
            'best_reward': self.best_reward,
            'agent_metrics': self.agent.training_metrics
        }
        
        stats_path = os.path.join(self.log_dir, 'training_stats.pt')
        torch.save(stats, stats_path)
        print(f"训练统计信息已保存到: {stats_path}")

def create_advanced_environment():
    """创建增强版环境"""
    # 环境参数
    num_components = 8  # 增加组件数以增加复杂度
    max_versions = 12   # 增加版本数
    time_windows = [15, 20, 18, 15, 22]  # 5个时间窗口
    max_parallel = 4    # 增加并行数
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 生成更复杂的依赖关系
    dependency_matrix = generate_feasible_dependency_matrix(
        num_components, max_versions, dep_prob=0.25  # 增加依赖概率
    )
    dependency_matrix = up_fill_dependency_matrix(num_components, max_versions, dependency_matrix)
    
    # 生成跳跃矩阵（允许跨版本升级）
    jump_matrix = np.zeros((num_components, max_versions, max_versions))
    for i in range(num_components):
        for v1 in range(max_versions):
            for v2 in range(v1 + 1, max_versions):
                # 允许跳跃的概率随距离递减
                distance = v2 - v1
                jump_prob = max(0.1, 0.8 - 0.1 * distance)
                jump_matrix[i, v1, v2] = np.random.choice([0, 1], p=[1-jump_prob, jump_prob])
    
    # 创建环境
    env = ParallelUpgradeEnv(
        num_components=num_components,
        time_windows=time_windows,
        dependency_matrix=dependency_matrix,
        jump_matrix=jump_matrix,
        max_parallel=max_parallel,
        device=device
    )
    
    return env

def create_advanced_agent(env, config=None):
    """创建增强版智能体"""
    if config is None:
        config = {
            'graph_hid_dim': 256,      # 增加网络容量
            'q_net_hid_dim': 512,
            'agent_feat_dim': 15,
            'lr': 5e-4,                # 调整学习率
            'gamma': 0.99,
            'eps_start': 1.0,
            'eps_end': 0.01,
            'eps_decay': 0.9995,       # 更慢的衰减
            'target_update': 200,      # 更频繁的目标网络更新
            'use_double_dqn': True,
            'use_dueling': True,
            'use_noisy': True,
            'use_priority_buffer': True
        }
    
    agent = AdvancedGNNDQNAgent(
        dependency_matrix=env.dependency_matrix.cpu().numpy(),
        jump_matrix=env.jump_matrix.cpu().numpy(),
        upgrade_time=env.upgrade_time.cpu().numpy(),
        device=str(env.device),
        **config
    )
    
    return agent

def main():
    """主训练函数"""
    print("=" * 60)
    print("高级软件升级规划训练系统")
    print("=" * 60)
    
    # 创建环境和智能体
    env = create_advanced_environment()
    agent = create_advanced_agent(env)
    
    # 创建训练管理器
    training_manager = AdvancedTrainingManager(
        env=env, 
        agent=agent, 
        log_dir="./advanced_training_logs"
    )
    
    print(f"环境信息:")
    print(f"  组件数量: {env.num_components}")
    print(f"  最大版本数: {max(env.max_versions)}")
    print(f"  时间窗口: {env.time_windows}")
    print(f"  最大并行数: {env.max_parallel}")
    print(f"  动作空间大小: {len(env.valid_actions)}")
    
    print(f"\n智能体配置:")
    print(f"  图隐藏维度: {agent.gnn.hidden_dim}")
    print(f"  Q网络隐藏维度: {agent.q_hid_dim}")
    print(f"  使用Double DQN: {agent.use_double_dqn}")
    print(f"  使用Dueling网络: {agent.use_dueling}")
    print(f"  使用Noisy Networks: {agent.use_noisy}")
    print(f"  使用优先级回放: {agent.use_priority_buffer}")
    
    # 开始训练
    try:
        final_eval = training_manager.train(
            num_episodes=3000,    # 增加训练回合数
            eval_interval=100,
            save_interval=500
        )
        
        print("\n" + "=" * 60)
        print("训练完成！最终评估结果:")
        print("=" * 60)
        for key, value in final_eval.items():
            if isinstance(value, float):
                print(f"{key}: {value:.4f}")
            else:
                print(f"{key}: {value}")
        
    except KeyboardInterrupt:
        print("\n训练被用户中断")
        print("保存当前模型状态...")
        agent.save("./advanced_training_logs/interrupted_model.pt")
        print("模型已保存")
    
    except Exception as e:
        print(f"\n训练过程中出现错误: {e}")
        print("保存当前模型状态...")
        # agent.save("./advanced_training_logs/error_model.pt")
        print("模型已保存")
        raise

if __name__ == "__main__":
    main()