import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)

from agent import Agent
from common.replay_buffer import Buffer

import os
import numpy as np
# torch
import torch
torch.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# logging
import logging
import shutil
import random

from pathlib import Path
from has import calculate_model_hash
from torch.utils.tensorboard import SummaryWriter

# 新的代码：
from environment.environment import compute_encirclement_reward

def trans_to_float(info):
    """将信息转换为浮点数列表"""
    import numpy as np
    result = []
    
    # 处理position字段
    position = info['position']
    if isinstance(position, np.ndarray):
        result.extend([float(x) for x in position.flatten()])
    elif isinstance(position, (list, tuple)):
        result.extend([float(x) for x in position])
    else:
        result.append(float(position))
    
    # 处理action字段
    action = info['action']
    if isinstance(action, np.ndarray):
        result.extend([float(x) for x in action.flatten()])
    elif isinstance(action, (list, tuple)):
        result.extend([float(x) for x in action])
    else:
        result.append(float(action))
    
    # 处理其他字段
    served_user = info['served_user']
    if isinstance(served_user, np.ndarray):
        result.extend([float(x) for x in served_user.flatten()])
    elif isinstance(served_user, (list, tuple)):
        result.extend([float(x) for x in served_user])
    else:
        result.append(float(served_user))
    
    reward = info['reward']
    if isinstance(reward, np.ndarray):
        result.extend([float(x) for x in reward.flatten()])
    elif isinstance(reward, (list, tuple)):
        result.extend([float(x) for x in reward])
    else:
        result.append(float(reward))
    
    return result

def bit_reverse(data):
    """位反转函数"""
    if isinstance(data, (list, tuple)):
        return [1 - x if isinstance(x, (int, float)) and x in [0, 1] else x for x in data]
    elif isinstance(data, (int, float)):
        return 1 - data if data in [0, 1] else data
    else:
        return data

class Runner:
    def __init__(self, args, env):
        self.args = args
        self.noise = args.noise_rate
        self.epsilon = args.epsilon
        self.exploration_delay = 0.0003
        self.episode_limit = args.max_episode_len
        self.min_exploration_rate = 0
        self.episodes = 1000
        self.env = env
        self.position_agents = self._init_agents(0, env)
        self.squares = self._init_squares(args)
        self.buffer = Buffer(args)
        self.save_path = self.args.save_dir + '/' + self.args.scenario_name
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        
        # 环绕任务相关参数
        self.encirclement_success_count = 0
        self.total_episodes = 0
        self.best_completion_time = float('inf')
        
    def _init_squares(self, args):
        squares = [[[False, 0] for _ in range(200)] for _ in range(200)]
        return squares

    def _init_agents(self, flag, env):
        agents = []
        if flag == 0:
            num_action = 4
        elif flag == 1:
            num_action = 2
        else:
            raise NotImplementedError
        for i in range(self.args.num_uavs):
            agent = Agent(i, self.args, num_action, env)
            agents.append(agent)
        return agents

    def evaluate_encirclement_performance(self, episode):
        """评估环绕任务性能"""
        if hasattr(self.env, 'targets') and len(self.env.targets) > 0:
            target = self.env.targets[0]
            
            # 计算性能指标
            surrounding_count = len(target.surrounding_uavs)
            is_surrounded = target.is_surrounded
            
            # 计算UAV分布均匀性
            if surrounding_count >= 2:
                angles = []
                for uav_id in target.surrounding_uavs:
                    if uav_id < len(self.env.uavs):
                        uav_pos = self.env.uavs[uav_id].position
                        dx = uav_pos[0] - target.position[0]
                        dy = uav_pos[1] - target.position[1]
                        angle = np.arctan2(dy, dx)
                        angles.append(angle)
                
                if len(angles) > 1:
                    angles.sort()
                    angle_diffs = []
                    for i in range(len(angles)):
                        diff = angles[(i + 1) % len(angles)] - angles[i]
                        if diff < 0:
                            diff += 2 * np.pi
                        angle_diffs.append(diff)
                    
                    ideal_diff = 2 * np.pi / len(angles)
                    uniformity = 1.0 - min(1.0, np.std(angle_diffs) / ideal_diff)
                else:
                    uniformity = 0.0
            else:
                uniformity = 0.0
            
            return {
                'surrounding_count': surrounding_count,
                'is_surrounded': is_surrounded,
                'uniformity': uniformity
            }
        
        return {'surrounding_count': 0, 'is_surrounded': False, 'uniformity': 0.0}

    def uav_train1(self):
        # 日志设置
        log_file_path = r'log\uav_log.log'
        log_dir = os.path.dirname(log_file_path)
        if os.path.exists(log_dir):
            for filename in os.listdir(log_dir):
                file_path = os.path.join(log_dir, filename)
                try:
                    if os.path.isfile(file_path) or os.path.islink(file_path):
                        os.unlink(file_path)
                    elif os.path.isdir(file_path): 
                        shutil.rmtree(file_path)
                except Exception as e:
                    print(f'Failed to delete {file_path}. Reason: {e}')
        else:
            os.makedirs(log_dir)

        # 配置日志
        logging.basicConfig(
            filename=log_file_path, 
            level=logging.INFO, 
            format='%(asctime)s - %(levelname)s - %(message)s'
        )
        
        # 初始化tensorboard writer
        tensorboard_dir = './tensorboard_logs'
        if not os.path.exists(tensorboard_dir):
            os.makedirs(tensorboard_dir)
        self.writer = SummaryWriter(log_dir=tensorboard_dir)
        
        self.train_step_count = 0
        reward_all = []
        
        # 初始化所有UAV的上一轮信息
        prev_round_info = {uav.id: {
            'position': None,
            'action': None,
            'served_user': False,
            'reward': 0
        } for uav in self.env.uavs}
        
        for episode in range(self.episodes):
            # 重置环境
            self.env.reset()
            
            if episode == 810:
                print("exploration_delay:", self.exploration_delay)
            
            i = 0
            total_reward = 0
            episode_rewards = []
            
            self.buffer = Buffer(self.args)
            continue_training = True
            
            # 记录episode开始时的性能
            start_performance = self.evaluate_encirclement_performance(episode)
            
            while continue_training and i < self.episode_limit:
                self.epsilon = max(self.min_exploration_rate, self.epsilon - self.exploration_delay)
                
                # 初始化动作和状态列表
                u1, u2, u = [], [], []
                s_next1, s_next2, s_next = [], [], []
                s, r1, r2, r = [], [], [], []
                s1, s2 = [], []
                
                current_info = {}
                current_err_other_uav_states = []
                step_rewards = []
                
                for uav in self.env.uavs:
                    # 获取增强状态
                    target_pos = self.env.targets[0].position if self.env.targets else None
                    other_positions = [other_uav.position for other_uav in self.env.uavs if other_uav.id != uav.id]
                    
                    # 获取基础状态
                    state = uav.get_state()
                    
                    # 更新协作指标
                    if self.env.targets:
                        uav.update_cooperation_metrics(self.env.targets[0], self.env.get_uav_positions())
                    
                    if state is None:
                        print("state is None")
                        continue
                    
                    other_uav_states = []
                    current_other_uav_states = []

                    if i == 0:
                        # 第一步随机动作
                        index = np.random.randint(0, self.args.num_position_action)
                        action = [0, 0, 0, 0]
                        action[index] = 1
                        action_position = action
                        print("i = 0")
                    else:
                        # 构建其他UAV状态信息
                        for uav1 in self.env.uavs:
                            if uav1.id == uav.id:
                                continue
                            info = prev_round_info[uav1.id]
                            other_uav_states.extend(trans_to_float(info))
                        
                        # 构建历史输入
                        if i < 6:
                            history_input = np.zeros(10)
                        else:
                            history_input = np.concatenate(uav.history_position[-5:])
                        
                        # 添加目标信息到状态
                        target_info = []
                        if target_pos:
                            dx = target_pos[0] - state[0]
                            dy = target_pos[1] - state[1]
                            distance = np.sqrt(dx**2 + dy**2)
                            angle = np.arctan2(dy, dx)
                            target_info = [dx, dy, distance, angle]
                        else:
                            target_info = [0, 0, 0, 0]
                        
                        # 构建完整状态输入
                        state_input = np.concatenate((
                            history_input, 
                            state, 
                            target_info,
                            other_uav_states
                        )).astype(np.float32)
                        
                        # 选择动作
                        action_position, flag = self.position_agents[uav.id].select_action(
                            state_input, self.epsilon, uav, episode
                        )
                        
                        s1.append(state_input)
                        s2.append(bit_reverse(state_input))
                        u1.append(action_position)
                        u2.append(bit_reverse(action_position))
                    
                    # 执行动作
                    reward_raw, served_flag = self.env.perform_action(uav, action_position)
                    
                    # 计算环绕奖励
                    nearest_target, _ = self.env.find_nearest_target(uav.position)
                    action_direction = np.argmax(action_position)
                    reward = compute_encirclement_reward(
                        uav, nearest_target, self.env.get_uav_positions(), 
                        action_direction, episode, i
                    )
                    
                    int_reward = int(reward)
                    next_state = uav.get_state()
                    step_rewards.append(reward)
                    
                    # 存储当前轮信息
                    current_info[uav.id] = {
                        'position': bit_reverse(state),
                        'action': bit_reverse(action_position),
                        'served_user': bit_reverse(served_flag),
                        'reward': bit_reverse(int_reward)
                    }
                    
                    info_current = {
                        'position': state,
                        'action': action_position,
                        'served_user': served_flag,
                        'reward': int_reward
                    }
                    
                    current_err_info = current_info[uav.id]
                    current_err_other_uav_states.append(trans_to_float(current_err_info))
                    current_other_uav_states.append(trans_to_float(info_current))

                    if i != 0:
                        s_next1.append(next_state)
                        s_next2.append(bit_reverse(next_state))
                        r1.append(int_reward)
                        r2.append(bit_reverse(int_reward))
                    
                    total_reward += reward
                    
                    # 详细日志记录
                    if i % 10 == 0:  # 每10步记录一次详细信息
                        logging.info(
                            f"Episode {episode}, Step {i}, UAV {uav.id}: "
                            f"Position {state}, Action {action_position}, Reward {int_reward:.2f}"
                        )
                
                # 更新上一轮信息
                for uav_id, info in current_info.items():
                    prev_round_info[uav_id] = {
                        'position': info['position'],
                        'action': info['action'],
                        'served_user': info['served_user'],
                        'reward': info['reward']
                    }
                
                # 环境步进
                env_done = self.env.step()
                
                # 检查环绕任务完成条件
                if env_done:
                    continue_training = False
                    self.encirclement_success_count += 1
                    if i < self.best_completion_time:
                        self.best_completion_time = i
                    
                    logging.info(f"Episode {episode}: Encirclement task completed at step {i}")
                    
                    # 记录成功指标
                    self.writer.add_scalar('Task/Completion_Step', i, episode)
                    self.writer.add_scalar('Task/Success', 1, episode)
                    self.writer.add_scalar('Task/Success_Rate', 
                                         self.encirclement_success_count / (episode + 1), episode)
                
                # 训练智能体
                if i > 0 and len(s1) > 0:
                    # 构建训练数据
                    for agent_id in range(self.args.num_uavs):
                        if agent_id < len(s1):
                            # 添加经验到缓冲区
                            # 添加经验到缓冲区
                            self.buffer.store_episode(
                                s1[agent_id], u1[agent_id], r1[agent_id], s_next1[agent_id], agent_id=agent_id
                            )
                    
                    # 训练网络
                    if self.buffer.current_size >= self.args.batch_size:
                        for agent_id in range(self.args.num_uavs):
                            self.position_agents[agent_id].learn(self.buffer, self.position_agents)
                            self.train_step_count += 1
                
                # 记录训练指标
                if episode % 10 == 0 and i % 20 == 0:
                    avg_step_reward = np.mean(step_rewards) if step_rewards else 0
                    self.writer.add_scalar('Training/Average_Step_Reward', avg_step_reward, 
                                         episode * self.episode_limit + i)
                    self.writer.add_scalar('Training/Epsilon', self.epsilon, 
                                         episode * self.episode_limit + i)
                    
                    # 记录环绕性能
                    performance = self.evaluate_encirclement_performance(episode)
                    self.writer.add_scalar('Encirclement/Surrounding_Count', 
                                         performance['surrounding_count'], 
                                         episode * self.episode_limit + i)
                    self.writer.add_scalar('Encirclement/Uniformity', 
                                         performance['uniformity'], 
                                         episode * self.episode_limit + i)
                
                episode_rewards.append(np.mean(step_rewards) if step_rewards else 0)
                i += 1
            
            # Episode结束统计
            avg_episode_reward = np.mean(episode_rewards) if episode_rewards else 0
            reward_all.append(avg_episode_reward)
            
            # 记录episode级别的指标
            self.writer.add_scalar('Episode/Average_Reward', avg_episode_reward, episode)
            self.writer.add_scalar('Episode/Total_Reward', total_reward, episode)
            self.writer.add_scalar('Episode/Length', i, episode)
            
            # 记录环绕任务特定指标
            end_performance = self.evaluate_encirclement_performance(episode)
            self.writer.add_scalar('Episode/Final_Surrounding_Count', 
                                 end_performance['surrounding_count'], episode)
            self.writer.add_scalar('Episode/Final_Uniformity', 
                                 end_performance['uniformity'], episode)
            self.writer.add_scalar('Episode/Task_Success', 
                                 1 if end_performance['is_surrounded'] else 0, episode)
            
            # 定期保存模型
            if episode % 100 == 0 and episode > 0:
                for agent_id in range(self.args.num_uavs):
                    model_path = os.path.join(self.save_path, f'agent_{agent_id}_episode_{episode}.pth')
                    self.position_agents[agent_id].save_model(model_path)
                
                logging.info(f"Episode {episode}: Models saved. "
                           f"Success rate: {self.encirclement_success_count / (episode + 1):.3f}, "
                           f"Best completion time: {self.best_completion_time}")
            
            # 打印训练进度
            if episode % 50 == 0:
                success_rate = self.encirclement_success_count / (episode + 1)
                print(f"Episode {episode}: Avg Reward: {avg_episode_reward:.2f}, "
                      f"Success Rate: {success_rate:.3f}, "
                      f"Best Time: {self.best_completion_time}")
            
            self.total_episodes += 1
        
        # 训练结束，保存最终模型
        for agent_id in range(self.args.num_uavs):
            final_model_path = os.path.join(self.save_path, f'agent_{agent_id}_final.pth')
            self.position_agents[agent_id].save_model(final_model_path)
        
        # 关闭tensorboard writer
        self.writer.close()
        
        # 打印最终统计
        final_success_rate = self.encirclement_success_count / self.total_episodes
        print(f"\nTraining completed!")
        print(f"Total episodes: {self.total_episodes}")
        print(f"Successful encirclements: {self.encirclement_success_count}")
        print(f"Success rate: {final_success_rate:.3f}")
        print(f"Best completion time: {self.best_completion_time} steps")
        
        logging.info(f"Training completed. Success rate: {final_success_rate:.3f}, "
                    f"Best completion time: {self.best_completion_time}")
        
        return reward_all

    def evaluate(self, num_episodes=10):
        """评估训练好的模型"""
        print("Starting evaluation...")
        
        success_count = 0
        completion_times = []
        
        for episode in range(num_episodes):
            self.env.reset()
            i = 0
            
            while i < self.episode_limit:
                for uav in self.env.uavs:
                    state = uav.get_state()
                    target_pos = self.env.targets[0].position if self.env.targets else None
                    
                    # 构建状态输入（与训练时相同）
                    if target_pos:
                        dx = target_pos[0] - state[0]
                        dy = target_pos[1] - state[1]
                        distance = np.sqrt(dx**2 + dy**2)
                        angle = np.arctan2(dy, dx)
                        target_info = [dx, dy, distance, angle]
                    else:
                        target_info = [0, 0, 0, 0]
                    
                    history_input = np.zeros(10) if i < 6 else np.concatenate(uav.history_position[-5:])
                    other_uav_states = [0] * (4 * (self.args.num_uavs - 1))  # 简化的其他UAV状态
                    
                    state_input = np.concatenate((
                        history_input, state, target_info, other_uav_states
                    )).astype(np.float32)
                    
                    # 使用贪婪策略选择动作
                    action_position, _ = self.position_agents[uav.id].select_action(
                        state_input, 0.0, uav, episode  # epsilon=0.0 for greedy
                    )
                    
                    self.env.perform_action(uav, action_position)
                
                if self.env.step():
                    success_count += 1
                    completion_times.append(i)
                    print(f"Evaluation episode {episode + 1}: Success in {i} steps")
                    break
                
                i += 1
            
            if i >= self.episode_limit:
                print(f"Evaluation episode {episode + 1}: Failed (timeout)")
        
        success_rate = success_count / num_episodes
        avg_completion_time = np.mean(completion_times) if completion_times else float('inf')
        
        print(f"\nEvaluation Results:")
        print(f"Success rate: {success_rate:.3f} ({success_count}/{num_episodes})")
        print(f"Average completion time: {avg_completion_time:.1f} steps")
        
        return success_rate, avg_completion_time

    def build_simplified_state(self, uav, target_pos, other_uavs):
        """构建简化的10维状态"""
        state = []
        
        # 1. 当前位置（归一化到[0,1]）
        pos = np.array(uav.position, dtype=np.float32) / self.args.grid_size
        state.extend(pos)
        
        # 2. 目标相对位置（归一化）
        if target_pos:
            rel_target = (np.array(target_pos, dtype=np.float32) - np.array(uav.position, dtype=np.float32)) / self.args.grid_size
            state.extend(rel_target)
        else:
            state.extend([0.0, 0.0])
        
        # 3. 其他UAV相对位置（最多2个，不足填0）
        other_positions = []
        for other_uav in other_uavs:
            if other_uav.id != uav.id:
                rel_pos = (np.array(other_uav.position, dtype=np.float32) - np.array(uav.position, dtype=np.float32)) / self.args.grid_size
                other_positions.extend(rel_pos)
        
        # 确保正好4维（2个其他UAV的相对位置）
        while len(other_positions) < 4:
            other_positions.append(0.0)
        state.extend(other_positions[:4])
        
        # 4. 历史移动方向（上一步的移动向量）
        if len(uav.history_position) >= 2:
            last_move = np.array(uav.history_position[-1], dtype=np.float32) - np.array(uav.history_position[-2], dtype=np.float32)
            last_move = last_move / self.args.grid_size  # 归一化
            state.extend(last_move)
        else:
            state.extend([0.0, 0.0])
        
        return np.array(state, dtype=np.float32)

    def build_simplified_next_state(self, uav, next_position, target_pos, other_uavs):
        """构建简化的下一个状态"""
        state = []
        
        # 1. 下一个位置（归一化）
        pos = np.array(next_position, dtype=np.float32) / self.args.grid_size
        state.extend(pos)
        
        # 2. 目标相对位置
        if target_pos:
            rel_target = (np.array(target_pos, dtype=np.float32) - np.array(next_position, dtype=np.float32)) / self.args.grid_size
            state.extend(rel_target)
        else:
            state.extend([0.0, 0.0])
        
        # 3. 其他UAV相对位置
        other_positions = []
        for other_uav in other_uavs:
            if other_uav.id != uav.id:
                rel_pos = (np.array(other_uav.position, dtype=np.float32) - np.array(next_position, dtype=np.float32)) / self.args.grid_size
                other_positions.extend(rel_pos)
        
        while len(other_positions) < 4:
            other_positions.append(0.0)
        state.extend(other_positions[:4])
        
        # 4. 当前移动方向（从当前位置到下一个位置）
        current_move = np.array(next_position, dtype=np.float32) - np.array(uav.position, dtype=np.float32)
        current_move = current_move / self.args.grid_size
        state.extend(current_move)
        
        return np.array(state, dtype=np.float32)