from tqdm import tqdm
from agent import Agent
from common.replay_buffer import Buffer
from common.data_logger import DataLogger
import torch
import os
import numpy as np
import matplotlib.pyplot as plt
import random
# torch
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# logging
import logging
import shutil



class Runner:
    def __init__(self, args, env):
        self.args = args
        # 在初始化时再次确保随机种子设置
        self._set_seed(args.seed)
        self.noise = args.noise_rate
        self.epsilon = args.epsilon
        self.exploration_delay = 0.0002
        self.episode_limit = args.max_episode_len
        self.min_exploration_rate = 0.05
        self.episodes = 300  # 增加训练轮数
        self.env = env
        self.agents = self._init_agents()
        self.buffer = Buffer(args)
        self.save_path = self.args.save_dir + '/' + self.args.scenario_name
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
         # 初始化数据记录器
        self.data_logger = DataLogger()
        
        # 为每个agent设置数据记录器
        for agent in self.agents:
            agent.policy.data_logger = self.data_logger

        self.bit_flip_probability = 0
        self.position_precision = 1000  # 位置坐标的精度，用于转换为整数

    def _init_agents(self):
        """初始化智能体"""
        agents = []
        for i in range(self.args.num_uavs):
            agent = Agent(i, self.args, self.args.num_position_action)
            agents.append(agent)
        return agents

    def add_transmission_error(self, positions, flip_probability=None):
        """为位置数据添加bit翻转传输误差
        
        Args:
            positions: 位置列表，每个位置是(x, y)格式的tuple
            flip_probability: bit翻转概率，如果为None则使用默认值
        
        Returns:
            带有传输误差的位置列表，保持与输入相同的数据类型
        """
        if flip_probability is None:
            flip_probability = self.bit_flip_probability

         # 如果翻转概率为0，直接返回原始位置
        if flip_probability == 0:
            return positions   

        corrupted_positions = []
        
        for pos in positions:
            corrupted_coords = []
            for coord in pos:  # x, y坐标
                # 检测输入坐标的原始类型
                original_type = type(coord)
                
                # 将坐标转换为整数进行bit操作
                if isinstance(coord, int):
                    quantized_coord = coord
                else:
                    quantized_coord = int(coord * self.position_precision)
                
                # 获取32位整数的二进制表示
                if quantized_coord < 0:
                    # 处理负数，使用补码表示
                    binary_repr = format(quantized_coord & 0xFFFFFFFF, '032b')
                else:
                    binary_repr = format(quantized_coord, '032b')
                
                # 对每个bit进行翻转检查
                corrupted_bits = list(binary_repr)
                for i in range(len(corrupted_bits)):
                    if np.random.random() < flip_probability:
                        # 翻转bit
                        corrupted_bits[i] = '1' if corrupted_bits[i] == '0' else '0'
                
                # 将修改后的二进制转换回整数
                corrupted_binary = ''.join(corrupted_bits)
                corrupted_int = int(corrupted_binary, 2)
                
                # 处理负数的情况
                if corrupted_int > 0x7FFFFFFF:
                    corrupted_int = corrupted_int - 0x100000000
                
                # 根据原始类型转换回相应的数据类型
                if original_type == int:
                    corrupted_coord = corrupted_int
                else:
                    corrupted_coord = corrupted_int / self.position_precision
                
                corrupted_coords.append(corrupted_coord)
            
            # 将list转换为tuple以保持与输入数据类型一致
            corrupted_positions.append(tuple(corrupted_coords))
        
        return corrupted_positions
        
    def build_state(self, uav_id):
        """构建单个无人机的状态"""
        uav = self.env.uavs[uav_id]
        target_pos = self.env.target.position
        other_uav_positions = [other_uav.position for i, other_uav in enumerate(self.env.uavs) if i != uav_id]
        # 为other_uav_positions添加传输误差
        corrupted_other_uav_positions = self.add_transmission_error(other_uav_positions)
        return uav.get_extended_state(target_pos, corrupted_other_uav_positions)

    def encirclement_train(self):
        """合围任务训练"""
        print("开始训练，详细日志将保存到文件中...")
    
        reward_all = []
        success_rate = []
        
        for episode in range(self.episodes):
            step = 0
            total_reward = 0
            self.env.reset()
            episode_success = False
            
            while step < self.episode_limit:
                self.epsilon = max(self.min_exploration_rate, 
                                 self.epsilon - self.exploration_delay)
                
                # 收集所有无人机的状态和动作
                states = []
                actions = []
                rewards = []
                next_states = []
                
                for uav_id, (uav, agent) in enumerate(zip(self.env.uavs, self.agents)):
                    # 获取当前状态
                    state = self.build_state(uav_id)
                    states.append(state)
                    
                    # 选择动作
                    action, is_random = agent.select_action(state, self.epsilon, uav)
                    actions.append(action)
                    
                    # 执行动作并获取奖励
                    reward = self.env.perform_action(uav, action, step, self.episode_limit, episode, use_optimized=True)
                    rewards.append(reward)
                    total_reward += reward
                    
                    # 获取下一状态
                    next_state = self.build_state(uav_id)
                    next_states.append(next_state)
                    

                    logging.info(f"Episode {episode+1}, Step {step+1}, UAV{uav_id}: "
                               f"pos={uav.position}, action={action}, reward={reward:.2f}")
                
                # 检查是否成功合围
                if self.env.check_encirclement():
                    episode_success = True
                    print(f"Episode {episode+1}: 合围成功! Step: {step+1}")
                    logging.info(f"Episode {episode+1}: 合围成功! Step: {step+1}")
                    # 给所有无人机额外奖励
                    for i in range(len(rewards)):
                        rewards[i] += 100
                    break
                
                # 存储经验
                self.buffer.store_episode(states, actions, rewards, next_states)
                
                # 训练
                if self.buffer.current_size >= self.args.batch_size:
                    transitions = self.buffer.sample(self.args.batch_size)
                    for agent, uav in zip(self.agents, self.env.uavs):
                        other_agents = self.agents.copy()
                        other_agents.remove(agent)
                        agent.learn(transitions, other_agents, uav,episode)
                
                step += 1
            
            # 记录结果
            reward_all.append(total_reward)
            success_rate.append(1 if episode_success else 0)
            
            # 计算最近10轮的成功率
            recent_success_rate = np.mean(success_rate[-10:]) if len(success_rate) >= 10 else np.mean(success_rate)
            
             # 使用数据记录器记录
            self.data_logger.log_episode(episode + 1, total_reward, episode_success, step, recent_success_rate)
            

             # 每50轮输出一次统计信息
            if (episode + 1) % 50 == 0:
                self.data_logger.print_summary(episode + 1, 50)
        
        # 训练完成
        self.data_logger.close()
        print("\n训练完成！详细数据已保存到Excel文件和TensorBoard日志中。")
        print("使用以下命令查看TensorBoard：")
        print("tensorboard --logdir=runs")


    # 在Runner类中添加evaluate方法
    def evaluate(self):
        """评估训练好的模型"""
        print("开始模型评估...")
        
        returns = []
        
        for episode in range(self.args.evaluate_episodes):
            step = 0
            total_reward = 0
            self.env.reset()
            episode_success = False
            
            print(f"评估 Episode {episode + 1}/{self.args.evaluate_episodes}...", end=' ')
            
            while step < self.args.evaluate_episode_len:
                # 收集所有无人机的状态和动作
                for uav_id, (uav, agent) in enumerate(zip(self.env.uavs, self.agents)):
                    # 获取当前状态
                    state = self.build_state(uav_id)
                    
                    # 选择动作（评估时不使用随机探索）
                    action, _ = agent.select_action(state, epsilon=0.0, uav=uav)
                    
                    # 执行动作并获取奖励
                    reward = self.env.perform_action(uav, action, step, self.args.evaluate_episode_len, episode, use_optimized=True)
                    total_reward += reward
                
                # 检查是否成功合围
                if self.env.check_encirclement():
                    episode_success = True
                    print(f"成功! 步数: {step + 1}, 奖励: {total_reward:.2f}")
                    break
                
                step += 1
            
            if not episode_success:
                print(f"失败, 步数: {step}, 奖励: {total_reward:.2f}")
            
            returns.append(total_reward)
        
        # 计算评估统计
        avg_return = np.mean(returns)
        std_return = np.std(returns)
        success_rate = sum(1 for i, episode in enumerate(range(self.args.evaluate_episodes)) 
                        if returns[i] > 0) / self.args.evaluate_episodes
        
        print(f"\n评估结果:")
        print(f"平均回报: {avg_return:.2f} ± {std_return:.2f}")
        print(f"成功率: {success_rate:.2%}")
        
        return avg_return

    def _set_seed(self, seed):
        """内部随机种子设置方法"""
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)
