import numpy as np
import torch
import os
from maddpg.maddpg import MADDPG
from collections import deque
import heapq

# 移除有问题的导入，改为在类内部导入或定义
# from path_planning.traditional_planners import HybridPathPlanner, RRTPathPlanner, PotentialFieldPlanner, DijkstraPlanner

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class HierarchicalAgent:
    """分层决策智能体：高层目标分配 + 低层路径规划"""
    
    def __init__(self, agent_id, args, num_action):
        self.args = args
        self.agent_id = agent_id
        
        # 高层决策：目标分配网络
        self.target_assignment_policy = MADDPG(args, agent_id, args.num_targets)
        
        # 低层决策：路径规划器
        self.path_planning_method = getattr(args, 'path_planning_method', 'astar')
        self._init_path_planners(args.grid_size)
        
        # 当前分配的目标
        self.assigned_target_id = None
        self.current_path = []
        self.path_index = 0
        
        # 决策历史
        self.assignment_history = deque(maxlen=10)
        self.last_assignment_step = 0
        
        # 路径规划性能统计
        self.planning_stats = {
            'astar': {'count': 0, 'success': 0, 'avg_time': 0}
        }
        
    def _init_path_planners(self, grid_size):
        """初始化路径规划器"""
        # 只使用A*算法，避免复杂的导入问题
        self.planners = {
            'astar': AStarPathPlanner(grid_size)
        }
        
        # 设置当前使用的规划器
        self.current_planner = self.planners['astar']
    
    def select_target(self, global_state, epsilon, step):
        """高层决策：选择要围捕的目标"""
        # 每隔一定步数重新分配目标
        if (step - self.last_assignment_step) >= self.args.reassignment_interval:
            if np.random.uniform() < epsilon:
                # 探索：随机选择目标
                target_id = np.random.randint(0, self.args.num_targets)
            else:
                # 利用：使用神经网络选择目标
                inputs = torch.tensor(global_state, dtype=torch.float32).unsqueeze(0)
                target_probs = self.target_assignment_policy.actor_network(inputs.to(device), None)
                target_id = torch.argmax(target_probs).item()
            
            self.assigned_target_id = target_id
            self.assignment_history.append(target_id)
            self.last_assignment_step = step
            
            return target_id, True  # 新分配
        
        return self.assigned_target_id, False  # 保持当前分配
    
    def plan_path_to_target(self, uav_position, target_position, obstacles):
        """低层决策：规划到目标的路径"""
        try:
            path = self.current_planner.find_path(uav_position, target_position, obstacles)
            
            # 更新统计信息
            method = 'astar'
            self.planning_stats[method]['count'] += 1
            self.planning_stats[method]['success'] += 1 if len(path) > 1 else 0
            
            return path
            
        except Exception as e:
            print(f"路径规划失败 (Agent {self.agent_id}): {e}")
            # 降级到直线路径
            return [uav_position, target_position]
    
    def select_action(self, state, epsilon, uav, global_state, step):
        """分层动作选择"""
        # 高层决策：目标分配
        target_id, is_new_assignment = self.select_target(global_state, epsilon, step)
        
        # 获取目标位置
        target_positions = self.extract_target_positions(global_state)
        if target_id < len(target_positions):
            target_pos = target_positions[target_id]
        else:
            target_pos = target_positions[0] if target_positions else uav.position
        
        # 获取障碍物信息（其他无人机位置）
        obstacles = self.extract_obstacle_positions(global_state, uav.position)
        
        # 低层决策：路径规划
        if is_new_assignment or len(self.current_path) == 0 or self.path_index >= len(self.current_path):
            # 重新规划路径
            self.current_path = self.plan_path_to_target(uav.position, target_pos, obstacles)
            self.path_index = 0
        
        # 执行路径中的下一步
        if self.current_path and self.path_index < len(self.current_path):
            next_position = self.current_path[self.path_index]
            action = self.position_to_action(uav.position, next_position)
            self.path_index += 1
        else:
            # 如果没有路径，随机移动
            action = np.random.randint(0, self.args.num_position_action)
        
        # 转换为one-hot编码
        action_vector = np.zeros(self.args.num_position_action)
        action_vector[action] = 1
        
        return action_vector.copy(), target_id
    
    def extract_target_positions(self, global_state):
        """从全局状态中提取目标位置"""
        # 假设全局状态的后部分是目标位置
        num_uav_features = 3 * self.args.num_uavs  # position + energy
        target_features = global_state[num_uav_features:]
        
        positions = []
        # 提取目标位置，每个目标2个坐标
        num_targets = getattr(self.args, 'num_targets', 3)
        for i in range(0, min(len(target_features), num_targets * 2), 2):
            if i + 1 < len(target_features):
                positions.append((target_features[i], target_features[i+1]))
        
        return positions
    
    def extract_obstacle_positions(self, global_state, own_position):
        """提取其他无人机位置作为障碍物"""
        obstacles = []
        # 从全局状态中提取其他无人机位置
        for i in range(self.args.num_uavs):
            if i != self.agent_id:
                start_idx = i * 3  # position + energy
                if start_idx + 1 < len(global_state):
                    uav_pos = (global_state[start_idx], global_state[start_idx + 1])
                    obstacles.append(uav_pos)
        
        return obstacles
    
    def position_to_action(self, current_pos, target_pos):
        """将位置差转换为动作"""
        dx = target_pos[0] - current_pos[0]
        dy = target_pos[1] - current_pos[1]
        
        # 改进的8方向动作映射
        if abs(dx) < 1e-6 and abs(dy) < 1e-6:
            return 0  # 不移动
            
        angle = np.arctan2(dy, dx)
        # 将角度映射到8个方向
        angle_deg = np.degrees(angle)
        if angle_deg < 0:
            angle_deg += 360
        
        # 8方向映射：0-上, 1-右上, 2-右, 3-右下, 4-下, 5-左下, 6-左, 7-左上
        direction = int((angle_deg + 22.5) // 45) % 8
        return direction
    
    def learn(self, transitions, other_agents, uav, episode=0):
        """学习目标分配策略"""
        # 只训练高层的目标分配网络
        self.target_assignment_policy.train(transitions, other_agents, uav, episode)


class AStarPathPlanner:
    """A*路径规划算法"""
    
    def __init__(self, grid_size):
        self.grid_size = grid_size
        self.directions = [
            (-1, -1), (-1, 0), (-1, 1),
            (0, -1),           (0, 1),
            (1, -1),  (1, 0),  (1, 1)
        ]
    
    def heuristic(self, pos1, pos2):
        """启发式函数：欧几里得距离"""
        return np.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)
    
    def is_valid_position(self, pos, obstacles, safe_distance=20):
        """检查位置是否有效"""
        x, y = pos
        
        # 边界检查
        if x < 0 or x >= self.grid_size or y < 0 or y >= self.grid_size:
            return False
        
        # 障碍物检查
        for obs_pos in obstacles:
            if np.linalg.norm(np.array(pos) - np.array(obs_pos)) < safe_distance:
                return False
        
        return True
    
    def find_path(self, start, goal, obstacles, max_iterations=1000):
        """A*路径搜索"""
        start = (int(start[0]), int(start[1]))
        goal = (int(goal[0]), int(goal[1]))
        
        open_set = []
        heapq.heappush(open_set, (0, start))
        
        came_from = {}
        g_score = {start: 0}
        f_score = {start: self.heuristic(start, goal)}
        
        iterations = 0
        
        while open_set and iterations < max_iterations:
            iterations += 1
            current = heapq.heappop(open_set)[1]
            
            if np.linalg.norm(np.array(current) - np.array(goal)) < 10:
                # 重构路径
                path = []
                while current in came_from:
                    path.append(current)
                    current = came_from[current]
                path.reverse()
                return path
            
            for dx, dy in self.directions:
                neighbor = (current[0] + dx * 10, current[1] + dy * 10)
                
                if not self.is_valid_position(neighbor, obstacles):
                    continue
                
                tentative_g_score = g_score[current] + np.linalg.norm(
                    np.array(neighbor) - np.array(current)
                )
                
                if neighbor not in g_score or tentative_g_score < g_score[neighbor]:
                    came_from[neighbor] = current
                    g_score[neighbor] = tentative_g_score
                    f_score[neighbor] = g_score[neighbor] + self.heuristic(neighbor, goal)
                    
                    if neighbor not in [item[1] for item in open_set]:
                        heapq.heappush(open_set, (f_score[neighbor], neighbor))
        
        # 如果找不到路径，返回直接路径
        return [goal]