from environment.uav import UAV
from environment.target import Target
import numpy as np
import logging

reward_log_header_written = False
energy_consume = 1

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    filename='training.log',
    filemode='a'
)

class MultiUAVEnv:
    def __init__(self, num_uavs, grid_size, num_targets=1, num_position_action=4):
        self.grid_size = grid_size
        self.num_uavs = num_uavs
        
        self.targets = [Target(grid_size, i) for i in range(num_targets)]
        self.uavs = [UAV(grid_size, i, num_position_action) for i in range(num_uavs)]
        
        self.reward_scale = 0.01
        self.reward_clip = 10.0
        
        # 合围任务相关参数
        self.encirclement_radius = 2.0
        self.min_encirclement_uavs = 3
        self.max_episode_steps = 500
        self.current_step = 0
        
    def reset(self):
        """重置环境"""
        self.current_step = 0
        
        # 重置目标
        for target in self.targets:
            target.position = (self.grid_size // 2, self.grid_size // 2)
            target.is_surrounded = False
            target.surrounding_uavs.clear()
            target.escape_attempts = 0
        
        # 重置无人机（分散在目标周围）
        center_x, center_y = self.grid_size // 2, self.grid_size // 2
        for i, uav in enumerate(self.uavs):
            # 将无人机初始化在目标周围的圆形区域
            angle = 2 * np.pi * i / len(self.uavs)
            radius = 5  # 初始距离目标10个单位
            uav.position = (
                int(center_x + radius * np.cos(angle)),
                int(center_y + radius * np.sin(angle))
            )
            uav.history_position = []
            uav.energy = 100
    
    def get_global_state(self):
        """获取全局状态"""
        uav_states = np.concatenate([uav.get_state() for uav in self.uavs])
        target_states = np.concatenate([np.array(target.position) for target in self.targets])
        return np.concatenate([uav_states, target_states])
    
    def get_uav_positions(self):
        """获取所有无人机位置"""
        return [uav.position for uav in self.uavs]
    
    def find_nearest_target(self, position):
        """找到最近的目标"""
        min_distance = float('inf')
        nearest_target = None
        
        for target in self.targets:
            dist = np.linalg.norm(np.array(position) - np.array(target.position))
            if dist < min_distance:
                min_distance = dist
                nearest_target = target
        
        return nearest_target, min_distance
    
    def perform_action(self, uav, cruise_action):
        """执行动作"""
        move_reward, _ = uav.execute_cruise_action(cruise_action)
        
        # 更新目标的合围状态
        for target in self.targets:
            target.update_surrounding_status(self.get_uav_positions())
        
        # 对于环绕任务，served_flag 可以表示是否成功参与了环绕
        served_flag = False
        for target in self.targets:
            if uav.id in [uav_id for uav_id, _ in target.surrounding_uavs]:
                served_flag = True
                break
        
        return move_reward, served_flag
    
    def step(self):
        """环境步进"""
        self.current_step += 1
        
        # 目标移动
        for target in self.targets:
            target.move()
            target.update_surrounding_status(self.get_uav_positions())
        
        # 检查是否完成任务
        done = all(target.is_surrounded for target in self.targets)
        
        # 检查是否超时
        if self.current_step >= self.max_episode_steps:
            done = True
        
        return done
    
    def compute_joint_reward(self):
        """计算联合奖励"""
        total_reward = 0
        
        for target in self.targets:
            if target.is_surrounded:
                total_reward += 100  # 成功合围奖励
            else:
                # 根据参与合围的无人机数量给予部分奖励
                total_reward += len(target.surrounding_uavs) * 10
        
        return total_reward
def compute_encirclement_reward(uav, target, all_uav_positions, action_direction, episode, step):
    """优化的合围任务奖励函数"""
    reward = 0
    logs = {
        "distance_reward": 0,
        "encirclement_reward": 0,
        "formation_reward": 0,
        "movement_penalty": 0,
        "collision_penalty": 0,
        "cooperation_bonus": 0,
        "completion_bonus": 0,
        "efficiency_bonus": 0,
        "total_reward": 0
    }
    
    # 1. 智能距离奖励 - 根据当前合围状态调整
    distance_to_target = np.linalg.norm(np.array(uav.position) - np.array(target.position))
    optimal_distance = 2.0
    
    # 动态调整最优距离范围
    if len(target.surrounding_uavs) >= 2:
        # 如果已有UAV在合围，鼓励保持合适距离
        if optimal_distance - 0.5 <= distance_to_target <= optimal_distance + 0.5:
            distance_reward = 15  # 在最优范围内给予高奖励
        elif distance_to_target < optimal_distance - 0.5:
            distance_reward = 10 - (optimal_distance - distance_to_target) * 2  # 过近惩罚
        else:
            distance_reward = max(0, 10 - (distance_to_target - optimal_distance) * 1.5)
    else:
        # 如果还没有UAV合围，鼓励接近
        if distance_to_target <= optimal_distance:
            distance_reward = 15 - distance_to_target * 2
        else:
            distance_reward = max(0, 15 - distance_to_target)
    
    reward += distance_reward
    logs["distance_reward"] = distance_reward
    
    # 2. 合围奖励 - 分层奖励机制
    if uav.id in target.surrounding_uavs:
        base_encirclement_reward = 20
        # 根据合围UAV数量给予递增奖励
        count_bonus = min(len(target.surrounding_uavs) * 5, 25)
        encirclement_reward = base_encirclement_reward + count_bonus
        reward += encirclement_reward
        logs["encirclement_reward"] = encirclement_reward
    
    # 3. 改进的队形奖励 - 考虑动态平衡
    if len(target.surrounding_uavs) >= 2:
        angles = []
        for other_uav_id in target.surrounding_uavs:
            if other_uav_id != uav.id and other_uav_id < len(all_uav_positions):
                other_pos = all_uav_positions[other_uav_id]
                dx = other_pos[0] - target.position[0]
                dy = other_pos[1] - target.position[1]
                angle = np.arctan2(dy, dx)
                angles.append(angle)
        
        if angles and uav.id in target.surrounding_uavs:
            # 计算当前UAV的角度
            dx = uav.position[0] - target.position[0]
            dy = uav.position[1] - target.position[1]
            current_angle = np.arctan2(dy, dx)
            angles.append(current_angle)
            
            # 计算角度分布均匀性
            angles.sort()
            angle_diffs = []
            for i in range(len(angles)):
                diff = angles[(i + 1) % len(angles)] - angles[i]
                if diff < 0:
                    diff += 2 * np.pi
                angle_diffs.append(diff)
            
            # 计算均匀性得分
            ideal_diff = 2 * np.pi / len(angles)
            uniformity_score = 1.0 - min(1.0, np.std(angle_diffs) / ideal_diff)
            formation_reward = uniformity_score * 15
            reward += formation_reward
            logs["formation_reward"] = formation_reward
    
    # 4. 智能移动惩罚 - 根据任务阶段调整
    base_movement_penalty = 0.1
    if len(target.surrounding_uavs) >= 3:
        # 合围形成后，减少移动惩罚，鼓励维持
        movement_penalty = base_movement_penalty * 0.5
    else:
        movement_penalty = base_movement_penalty
    
    reward -= movement_penalty
    logs["movement_penalty"] = movement_penalty
    
    # 5. 增强的碰撞惩罚
    collision_penalty = 0
    for i, other_uav_pos in enumerate(all_uav_positions):
        if i != uav.id and other_uav_pos != uav.position:
            dist = np.linalg.norm(np.array(uav.position) - np.array(other_uav_pos))
            if dist < 1.0:  # 严格的最小安全距离
                collision_penalty += 10
            elif dist < 1.5:
                collision_penalty += 3
    
    reward -= collision_penalty
    logs["collision_penalty"] = collision_penalty
    
    # 6. 协作奖励 - 鼓励团队合作
    cooperation_bonus = 0
    if len(target.surrounding_uavs) >= 2:
        cooperation_bonus = len(target.surrounding_uavs) * 8
        # 如果当前UAV参与合围，额外奖励
        if uav.id in target.surrounding_uavs:
            cooperation_bonus += 10
    
    reward += cooperation_bonus
    logs["cooperation_bonus"] = cooperation_bonus
    
    # 7. 任务完成奖励
    if target.is_surrounded:
        completion_bonus = 150
        # 根据完成速度给予额外奖励
        if step < 100:
            completion_bonus += 50
        elif step < 200:
            completion_bonus += 25
        
        reward += completion_bonus
        logs["completion_bonus"] = completion_bonus
    
    # 8. 效率奖励 - 鼓励快速有效的行动
    efficiency_bonus = 0
    if uav.id in target.surrounding_uavs:
        # 根据历史表现给予效率奖励
        if len(uav.history_position) > 0:
            # 检查是否在合理时间内到达合围位置
            if step < 50:  # 早期到达给予奖励
                efficiency_bonus = 10
            elif step < 100:
                efficiency_bonus = 5
    
    reward += efficiency_bonus
    logs["efficiency_bonus"] = efficiency_bonus
    
    # 限制奖励范围
    reward = np.clip(reward, -20, 100)
    logs["total_reward"] = reward
    
    # 记录奖励详情
    import os, csv
    os.makedirs("logs", exist_ok=True)
    csv_path = os.path.join("logs", "encirclement_reward_breakdown.csv")
    global reward_log_header_written
    if "reward_log_header_written" not in globals():
        reward_log_header_written = False
    
    with open(csv_path, "a", newline="") as f:
        writer = csv.DictWriter(f, fieldnames=["episode", "step", "uav_id"] + list(logs.keys()))
        if not reward_log_header_written:
            writer.writeheader()
            reward_log_header_written = True
        logs["episode"] = episode
        logs["step"] = step
        logs["uav_id"] = uav.id
        writer.writerow(logs)
    
    return reward