from tqdm import tqdm
from hierarchical_agent import HierarchicalAgent
from environment.hierarchical_encirclement_env import HierarchicalEncirclementEnv
from common.replay_buffer import Buffer
from common.data_logger import DataLogger
import torch
import os
import numpy as np
import matplotlib.pyplot as plt
import random
import logging

class HierarchicalRunner:
    """分层决策训练器"""
    
    def __init__(self, args, env):
        self.args = args
        self.args.num_targets = getattr(args, 'num_targets', 3)
        self.args.reassignment_interval = getattr(args, 'reassignment_interval', 10)
        
        self._set_seed(args.seed)
        self.noise = args.noise_rate
        self.epsilon = args.epsilon
        self.exploration_delay = 0.0002
        self.episode_limit = args.max_episode_len
        self.min_exploration_rate = 0.05
        self.episodes = 500  # 增加训练轮数
        
        self.env = env
        self.agents = self._init_hierarchical_agents()
        self.buffer = Buffer(args)
        
        self.save_path = self.args.save_dir + '/' + self.args.scenario_name + '_hierarchical'
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        
        self.data_logger = DataLogger()
        
        # 为每个agent设置数据记录器
        for agent in self.agents:
            agent.target_assignment_policy.data_logger = self.data_logger
            
        # 添加输出空间定义
        self.high_level_action_space = self.args.num_targets  # 高层：目标分配
        self.low_level_action_space = self.args.num_position_action  # 低层：移动方向
        
        # 添加可视化控制
        self.show_plots = getattr(args, 'show_plots', False)
    
    def _set_seed(self, seed):
        """设置随机种子"""
        if seed is not None:
            np.random.seed(seed)
            random.seed(seed)
            torch.manual_seed(seed)
            if torch.cuda.is_available():
                torch.cuda.manual_seed(seed)
    
    def _init_hierarchical_agents(self):
        """初始化分层智能体"""
        agents = []
        for i in range(self.args.num_uavs):
            agent = HierarchicalAgent(i, self.args, self.args.num_position_action)
            agents.append(agent)
        return agents
    
    class HierarchicalRunner:
        def __init__(self, args, env):
            self.args = args
            self.args.num_targets = getattr(args, 'num_targets', 3)
            self.args.reassignment_interval = getattr(args, 'reassignment_interval', 10)
            
            self._set_seed(args.seed)
            self.noise = args.noise_rate
            self.epsilon = args.epsilon
            self.exploration_delay = 0.0002
            self.episode_limit = args.max_episode_len
            self.min_exploration_rate = 0.05
            self.episodes = 500  # 增加训练轮数
            
            self.env = env
            self.agents = self._init_hierarchical_agents()
            self.buffer = Buffer(args)
            
            self.save_path = self.args.save_dir + '/' + self.args.scenario_name + '_hierarchical'
            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)
            
            self.data_logger = DataLogger()
            
            # 为每个agent设置数据记录器
            for agent in self.agents:
                agent.target_assignment_policy.data_logger = self.data_logger
                
            # 添加输出空间定义
            self.high_level_action_space = self.args.num_targets  # 高层：目标分配
            self.low_level_action_space = self.args.num_position_action  # 低层：移动方向
            
            # 添加可视化控制
            self.show_plots = getattr(args, 'show_plots', False)
        
        def _set_seed(self, seed):
            """设置随机种子"""
            if seed is not None:
                np.random.seed(seed)
                random.seed(seed)
                torch.manual_seed(seed)
                if torch.cuda.is_available():
                    torch.cuda.manual_seed(seed)
        
        def _init_hierarchical_agents(self):
            """初始化分层智能体"""
            agents = []
            for i in range(self.args.num_uavs):
                agent = HierarchicalAgent(i, self.args, self.args.num_position_action)
                agents.append(agent)
            return agents
        
        def build_hierarchical_state(self, uav_id):
            """构建分层状态 - 支持部分可观测性"""
            if self.partial_observability:
                # 使用部分观测状态
                partial_state = self.env.get_partial_state(uav_id)
                
                # 个体状态（总是可观测的）
                uav = self.env.uavs[uav_id]
                individual_state = [
                    uav.position[0] / self.env.grid_size,  # 归一化位置
                    uav.position[1] / self.env.grid_size,  # 归一化位置
                    uav.energy / 100.0,  # 归一化能量
                    self.env.target_assignments.get(uav_id, -1) / max(1, self.args.num_targets - 1)  # 归一化目标分配
                ]
                
                # 归一化部分观测状态
                normalized_partial_state = []
                state_idx = 0
                
                # 归一化可观测的无人机状态
                for i in range(self.args.num_uavs):
                    if partial_state[state_idx] == -1:  # 未知信息
                        normalized_partial_state.extend([-1, -1, -1])
                    else:
                        normalized_partial_state.extend([
                            partial_state[state_idx] / self.env.grid_size,     # x位置
                            partial_state[state_idx + 1] / self.env.grid_size, # y位置
                            partial_state[state_idx + 2] / 100.0               # 能量
                        ])
                    state_idx += 3
                
                # 归一化可观测的目标状态
                for i in range(self.args.num_targets):
                    if partial_state[state_idx] == -1:  # 未知信息
                        normalized_partial_state.extend([-1, -1])
                    else:
                        normalized_partial_state.extend([
                            partial_state[state_idx] / self.env.grid_size,     # x位置
                            partial_state[state_idx + 1] / self.env.grid_size  # y位置
                        ])
                    state_idx += 2
                
                # 添加分配状态和可观测性信息（已经是0-1范围）
                if state_idx < len(partial_state):
                    normalized_partial_state.extend(partial_state[state_idx:])
                
                # 组合状态
                combined_state = np.concatenate([individual_state, normalized_partial_state])
                return combined_state
            
            else:
                # 使用全局状态（原有逻辑）
                global_state = self.env.get_global_state()
                
                # 个体状态
                uav = self.env.uavs[uav_id]
                individual_state = [
                    uav.position[0] / self.env.grid_size,  # 归一化位置
                    uav.position[1] / self.env.grid_size,  # 归一化位置
                    uav.energy / 100.0,  # 归一化能量
                    self.env.target_assignments.get(uav_id, -1) / max(1, self.args.num_targets - 1)  # 归一化目标分配
                ]
                
                # 归一化全局状态
                normalized_global_state = []
                state_idx = 0
                
                # 归一化无人机状态
                for i in range(self.args.num_uavs):
                    normalized_global_state.extend([
                        global_state[state_idx] / self.env.grid_size,     # x位置
                        global_state[state_idx + 1] / self.env.grid_size, # y位置
                        global_state[state_idx + 2] / 100.0               # 能量
                    ])
                    state_idx += 3
                
                # 归一化目标状态
                for i in range(self.args.num_targets):
                    normalized_global_state.extend([
                        global_state[state_idx] / self.env.grid_size,     # x位置
                        global_state[state_idx + 1] / self.env.grid_size  # y位置
                    ])
                    state_idx += 2
                
                # 添加分配状态（已经是0-1）
                if state_idx < len(global_state):
                    normalized_global_state.extend(global_state[state_idx:])
                
                # 组合状态
                combined_state = np.concatenate([individual_state, normalized_global_state])
                return combined_state
        
        def get_action_space_info(self):
            """获取动作空间信息"""
            return {
                'high_level': {
                    'type': 'discrete',
                    'size': self.high_level_action_space,
                    'description': '目标分配决策'
                },
                'low_level': {
                    'type': 'discrete', 
                    'size': self.low_level_action_space,
                    'description': '移动方向决策（8方向）'
                }
            }
        
        def hierarchical_train(self):
            """分层决策训练"""
            print("开始分层决策训练...")
            print(f"动作空间信息: {self.get_action_space_info()}")
            
            reward_all = []
            success_rate_all = []
            target_assignment_efficiency = []
            
            for episode in range(self.episodes):
                step = 0
                total_reward = 0
                self.env.reset()
                episode_success_rate = 0
                
                # 记录本轮的目标分配
                episode_assignments = []
                
                while step < self.episode_limit:
                    self.epsilon = max(self.min_exploration_rate, 
                                     self.epsilon - self.exploration_delay)
                    
                    # 获取全局状态
                    global_state = self.env.get_global_state()
                    
                    # 收集所有无人机的状态、动作和目标分配
                    states = []
                    actions = []
                    target_assignments = []
                    rewards = []
                    next_states = []
                    
                    for uav_id, (uav, agent) in enumerate(zip(self.env.uavs, self.agents)):
                        # 构建状态
                        state = self.build_hierarchical_state(uav_id)
                        states.append(state)
                        
                        # 分层动作选择
                        action, assigned_target = agent.select_action(
                            state, self.epsilon, uav, global_state, step
                        )
                        actions.append(action)
                        target_assignments.append(assigned_target)
                        
                        # 验证动作空间
                        if not (0 <= assigned_target < self.high_level_action_space):
                            assigned_target = assigned_target % self.high_level_action_space
                        
                        if isinstance(action, (list, np.ndarray)):
                            if len(action) != self.low_level_action_space:
                                # 如果动作维度不匹配，转换为单一动作
                                action = np.argmax(action) if len(action) > 1 else 0
                        
                        # 执行动作
                        reward = self.env.perform_hierarchical_action(
                            uav_id, action, assigned_target, step, self.episode_limit
                        )
                        rewards.append(reward)
                        total_reward += reward
                        
                        # 获取下一状态
                        next_state = self.build_hierarchical_state(uav_id)
                        next_states.append(next_state)
                    
                    # 记录目标分配
                    episode_assignments.append(target_assignments.copy())
                    
                    # 检查围捕成功
                    current_success_rate = self.env.get_success_rate()
                    episode_success_rate = max(episode_success_rate, current_success_rate)
                    
                    if self.env.is_done():
                        print(f"Episode {episode+1}: 所有目标围捕成功! Step: {step+1}")
                        logging.info(f"Episode {episode+1}: 所有目标围捕成功! Step: {step+1}")
                        break
                    
                    # 存储经验
                    self.buffer.store_episode(states, actions, rewards, next_states)
                    
                    # 训练
                    if self.buffer.current_size >= self.args.batch_size:
                        transitions = self.buffer.sample(self.args.batch_size)
                        for agent, uav in zip(self.agents, self.env.uavs):
                            other_agents = self.agents.copy()
                            other_agents.remove(agent)
                            agent.learn(transitions, other_agents, uav, episode)
                    
                    step += 1
                
                # 计算目标分配效率
                assignment_efficiency = self.calculate_assignment_efficiency(episode_assignments)
                
                # 记录结果
                reward_all.append(total_reward)
                success_rate_all.append(episode_success_rate)
                target_assignment_efficiency.append(assignment_efficiency)
                
                # 打印进度
                if (episode + 1) % 50 == 0:
                    avg_reward = np.mean(reward_all[-50:])
                    avg_success = np.mean(success_rate_all[-50:])
                    avg_efficiency = np.mean(target_assignment_efficiency[-50:])
                    print(f"Episode {episode+1}: Avg Reward: {avg_reward:.2f}, "
                          f"Success Rate: {avg_success:.2f}, Assignment Efficiency: {avg_efficiency:.2f}")
            
            # 保存训练结果
            self.save_training_results(reward_all, success_rate_all, target_assignment_efficiency)
            
            return reward_all, success_rate_all
        
        def calculate_assignment_efficiency(self, episode_assignments):
            """计算目标分配效率 - 改进算法"""
            if not episode_assignments:
                return 0
            
            # 计算分配的稳定性和均匀性
            assignment_changes = 0
            total_assignments = 0
            stability_score = 0
            
            for i, step_assignments in enumerate(episode_assignments):
                total_assignments += len(step_assignments)
                
                # 检查分配是否均匀
                target_counts = {}
                for target_id in step_assignments:
                    if target_id >= 0:  # 忽略无效分配
                        target_counts[target_id] = target_counts.get(target_id, 0) + 1
                
                # 计算分配方差（越小越均匀）
                if target_counts:
                    counts = list(target_counts.values())
                    if len(counts) > 1:
                        variance = np.var(counts)
                        assignment_changes += variance
                
                # 计算稳定性（相邻步骤分配变化）
                if i > 0:
                    prev_assignments = episode_assignments[i-1]
                    changes = sum(1 for j, (curr, prev) in enumerate(zip(step_assignments, prev_assignments)) 
                                if curr != prev)
                    stability_score += changes / len(step_assignments)
            
            # 返回综合效率分数（越高越好）
            if total_assignments > 0:
                uniformity_score = 1.0 / (1.0 + assignment_changes / total_assignments)
                stability_score = 1.0 / (1.0 + stability_score / len(episode_assignments))
                efficiency = (uniformity_score + stability_score) / 2
            else:
                efficiency = 0
            
            return efficiency
        
        def save_training_results(self, rewards, success_rates, efficiencies):
            """保存训练结果"""
            results = {
                'rewards': rewards,
                'success_rates': success_rates,
                'assignment_efficiencies': efficiencies,
                'action_space_info': self.get_action_space_info()
            }
            
            np.save(os.path.join(self.save_path, 'hierarchical_training_results.npy'), results)
            
            # 绘制训练曲线
            self.plot_training_curves(rewards, success_rates, efficiencies)
        
        def plot_training_curves(self, rewards, success_rates, efficiencies):
            """绘制训练曲线 - 添加显示控制"""
            fig, axes = plt.subplots(2, 2, figsize=(15, 10))
            
            # 奖励曲线
            axes[0, 0].plot(rewards)
            axes[0, 0].set_title('Training Rewards')
            axes[0, 0].set_xlabel('Episode')
            axes[0, 0].set_ylabel('Total Reward')
            axes[0, 0].grid(True, alpha=0.3)
            
            # 成功率曲线
            axes[0, 1].plot(success_rates)
            axes[0, 1].set_title('Success Rate')
            axes[0, 1].set_xlabel('Episode')
            axes[0, 1].set_ylabel('Success Rate')
            axes[0, 1].grid(True, alpha=0.3)
            
            # 分配效率曲线
            axes[1, 0].plot(efficiencies)
            axes[1, 0].set_title('Assignment Efficiency')
            axes[1, 0].set_xlabel('Episode')
            axes[1, 0].set_ylabel('Efficiency')
            axes[1, 0].grid(True, alpha=0.3)
            
            # 移动平均
            window = 50
            if len(rewards) >= window:
                moving_avg_rewards = np.convolve(rewards, np.ones(window)/window, mode='valid')
                axes[1, 1].plot(moving_avg_rewards)
                axes[1, 1].set_title(f'Moving Average Rewards (window={window})')
                axes[1, 1].set_xlabel('Episode')
                axes[1, 1].set_ylabel('Average Reward')
                axes[1, 1].grid(True, alpha=0.3)
            
            plt.tight_layout()
            plt.savefig(os.path.join(self.save_path, 'hierarchical_training_curves.png'), dpi=300, bbox_inches='tight')
            
            # 控制是否显示图表
            if self.show_plots:
                plt.show()
            else:
                plt.close()