import gym
import logging
import traceback
import sys
from gym.utils import seeding
import numpy as np
from typing import Dict, Any, Tuple
from ..core.simulatior import AircraftSimulator, BaseSimulator
from ..tasks.task_base import BaseTask
from ..utils.utils import parse_config



class BaseEnv(gym.Env):
    """
    这个BaseEnv类是一个用于模拟飞行动态的强化学习环境,它遵循OpenAI Gym接口规范。
    该类是一个基础环境,用于与特定的任务(Task)结合使用,这个任务实现了特定的飞行控制任务,
    包括特定的观察/动作空间和变量,以及计算代理奖励的方法。
    """
    metadata = {"render.modes": ["human", "txt"]} # 定义了环境支持的渲染模式，包括"human"和"txt"。
    
    def __init__(self, config_name: str):
        # 基础设置
        self.config = parse_config(config_name)
        self.max_steps = getattr(self.config, 'max_steps', 1000) # 最大步数,1000 是默认值。如果对象（在这里是self.config）中不存在名为 'max_steps' 的属性，则会返回默认值1000。
        self.sim_freq = getattr(self.config, 'sim_freq', 60)
        self.agent_interaction_steps = getattr(self.config, 'agent_interaction_steps', 12) # TODO agent_interaction_steps 是什么意思
        self.center_lon, self.center_lat, self.center_alt = \
            getattr(self.config, 'battle_field_center', (120.0, 60.0, 0.0)) # 战场中心位置
        self._create_records = False
        self.load() # 调用load方法加载任务和仿真器。
        
    @property
    def num_agents(self) -> int:
        return self.task.num_agents
    
    @property
    def observation_space(self) -> gym.Space:
        return self.task.observation_space
    
    @property
    def action_space(self) -> gym.Space:
        return self.task.action_space
    
    @property
    def agents(self) -> Dict[str, AircraftSimulator]:
        return self._jsbsims
    
    @property
    def time_interval(self) -> int:
        return self.agent_interaction_steps / self.sim_freq # 默认值 12/60=0.2s
    
    def load(self):
        self.load_task()
        self.load_simulator()
        self.seed()
        
    # BUG 这里能直接初始化抽象基类BaseTask的实例吗？
    # 因为一个类如果含有抽象方法（使用 @abstractmethod 装饰器标记的方法），那么它被认为是一个抽象基类（Abstract Base Class，ABC），并且不能被直接实例化。
    def load_task(self):
        # self.task = BaseTask(self.config)
        pass
        
    def load_simulator(self):
        self._jsbsims = {} # type: Dict[str, AircraftSimulator]
        for uid, config in self.config.aircraft_configs.items():
            self._jsbsims[uid] = AircraftSimulator(
                uid = uid,
                color = config.get("color", "Red"),
                model = config.get("model", "f16"),
                init_state = config.get("init_state"),
                origin = getattr(self.config, 'battle_field_center', (120.0, 60.0, 0.0)),
                sim_freq=self.sim_freq,
                num_missiles = config.get("missile", 0))
        # 不同的teams有不同的uid[0]
        _default_team_uid = list(self._jsbsims.keys())[0][0]
        self.ego_ids = [uid for uid in self._jsbsims.keys() if uid[0] == _default_team_uid]
        self.enm_ids = [uid for uid in self._jsbsims.keys() if uid[0] != _default_team_uid]
        
        # Link jsbsims
        for key, sim in self._jsbsims.items():
            for k, s in self._jsbsims.items():
                if k == key:
                    pass
                elif k[0] == key[0]:
                    sim.partners.append(s)
                else:
                    sim.enemies.append(s)
        
        self._tempsims = {} # type : Dict[str, BaseSimulator]
        
    def add_temp_simulator(self, sim: BaseSimulator):
        self._tempsims[sim.uid] = sim
        
    def reset(self) -> np.ndarray:
        """重置环境的状态，并返回一个初始的观察.

        Returns:
            obs (np.ndarray): initial observation
        """
        # reset sim
        self.current_step = 0
        for sim in self._jsbsims.values():
            sim.reload()
        self._tempsims.clear()
        # reset task
        self.task.reset(self)
        obs = self.get_obs()
        return self._pack(obs)
    
    def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, dict]:
        '''
        执行环境中的一个时间步长。这是强化学习中最重要的方法之一，用于在给定动作的情况下推进环境状态，并返回新的观察值、奖励、结束标志及额外信息.

        参数：
            action（np.ndarray），表示智能体的动作，支持输入对手的动作。
        
        返回：
            一个元组 (observation, reward, done, info)，其中：
                observation：当前环境下智能体的观察结果。
                rewards：执行动作后返回的奖励。
                dones：表示是否达到了环境的终止状态。
                info：包含额外信息的字典。
        '''
        self.current_step += 1
        info = {"current_step": self.current_step}
        # apply actions
        action = self._unpack(action)
        for agent_id in self.agents.keys():
            a_action = self.task.normalize_action(self, agent_id, action[agent_id])
            self.agents[agent_id].set_property_value(self.task.action_var, a_action)
        # run simulation
        for _ in range(self.agent_interaction_steps):
            for sim in self._jsbsims.values():
                sim.run()
            for sim in self._tempsims.values():
                sim.run()
        self.task.step(self)

        obs = self.get_obs()

        dones = {}

        for agent_id in self.agents.keys():
            done, info = self.task.get_termination(self, agent_id, info)
            dones[agent_id] = [done]
        
        rewards = {}
        for agent_id in self.agents.keys():
            reward, info = self.task.get_reward(self, agent_id, info)
            rewards[agent_id] = [reward]
        
        return self._pack(obs), self._pack(rewards), self._pack(dones), info

    def get_obs(self):
        '''
        返回所有智能体的观察结果
        '''
        return dict([(agent_id, self.task.get_obs(self, agent_id)) for agent_id in self.agents.keys()])

    def get_state(self):
        '''
        返回全局状态。

        NOTE: 这个方法在分散执行时不应被使用。
        '''
        return dict([(agent_id, self.task.get_obs(self, agent_id)) for agent_id in self.agents.keys()])

    def close(self):
        # 清理环境中的对象。当对象被垃圾收集或程序退出时，环境会自动关闭。
        for sim in self._jsbsims.values():
            sim.close()
        for sim in self._tempsims.values():
            sim.close()
        self._jsbsims.clear()
        self._tempsims.clear()

    def render(self, mode="txt", filepath='./JSBSimRecording.txt.acmi'):
        '''
        根据不同的模式，将仿真数据输出到指定的文件中。当前只实现了文本模式（txt）的渲染。

        - human: print on the terminal
        - txt: output to txt.acmi files

        Note:

            Make sure that your class's metadata 'render.modes' key includes
              the list of supported modes. It's recommended to call super()
              in implementations to use the functionality of this method.
        :param mode: str, the mode to render with
        '''
        if mode == "txt":
            if not self._create_records:
                with open(filepath, mode='w', encoding='utf-8-sig') as f:
                    f.write("FileType=text/acmi/tacview\n")
                    f.write("FileVersion=2.1\n")
                    f.write("0,ReferenceTime=2020-04-01T00:00:00Z\n")
                self._create_records = True
            with open(filepath, mode='a', encoding='utf-8-sig') as f:
                timestamp = self.current_step * self.time_interval
                f.write(f"{timestamp:.2f}\n")
                for sim in self._jsbsims.values():
                    log_msg = sim.log()
                    if log_msg is not None:
                        f.write(log_msg + "\n")
                for sim in self._tempsims.values():
                    log_msg = sim.log()
                    if log_msg is not None:
                        f.write(log_msg + "\n")
            # TODO 实时渲染[使用高级别Tacview的Socket接口]
        else:
            raise NotImplementedError

    def seed(self, seed=None):
        '''
        Sets the seed for this env's random number generator(s).
        Note:
            Some environments use multiple pseudorandom number generators.
            We want to capture all such seeds used in order to ensure that
            there aren't accidental correlations between multiple generators.
        Returns:
            list<bigint>: Returns the list of seeds used in this env's random
              number generators. The first value in the list should be the
              "main" seed, or the value which a reproducer should pass to
              'seed'. Often, the main seed equals the provided 'seed', but
              this won't be true if seed=None, for example.
        '''
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    def _pack(self, data: Dict[str, Any]) -> np.ndarray:
        # 将分离的键值对字典打包成分组的 np.ndarray。这个方法主要用于将智能体观察结果、奖励或其他信息打包，以便进行处理或网络输入。
        ego_data = np.array([data[uid] for uid in self.ego_ids])
        enm_data = np.array([data[uid] for uid in self.enm_ids])
        match enm_data.shape[0] > 0:
            case True:
                data = np.concatenate((ego_data, enm_data))
            case False:
                data = ego_data
        if np.isnan(data).sum() > 0:
            logging.error("Data contains NaN values.")
            traceback.print_stack()
            sys.exit("Error: Data contains NaN values.")
        return data[:self.num_agents, ...]



    def _unpack(self, data: np.ndarray) -> Dict[str, Any]:
        '''
        将分组的 np.ndarray 解包成分离的键值对字典。主要用于将网络输出或其他分组数据解包为适用于环境的格式
        '''
        # 确保传入的 data 参数是 np.ndarray、列表或元组类型，并且其长度等于环境中智能体的数量（self.num_agents
        all_ids = self.ego_ids + self.enm_ids
        if not isinstance(data, (np.ndarray, list, tuple)) or len(data) != self.num_agents:
            logging.error("data参数数据类型不正确（必须为np.ndarray, list, tuple中任一种） 或 data数量不等于智能体数量.")
            traceback.print_stack()  # 打印调用堆栈
            sys.exit("Error: data参数数据类型不正确（必须为np.ndarray, list, tuple中任一种） 或 data数量不等于智能体数量.")  # 退出程序
        # 检查一下以免接下来的for循环出错
        if not len(all_ids) == self.num_agents:
            logging.warning("敌我所有智能体的ID列表数量 ！= self.num_agents.")
            traceback.print_stack()  # 打印调用堆栈
            sys.exit("Error: 敌我所有智能体的ID列表数量 ！= self.num_agents.")  # 退出程序
        # 首先将 self.ego_ids（己方智能体的ID列表）和 self.enm_ids（敌方智能体的ID列表）合并，并取前 self.num_agents 个元素，
        # 以确保与 data 中的数据项对应。然后，使用 zip 函数将智能体ID与 data 中的相应数据配对，并通过 dict 函数将这些配对转换成一个字典。
        unpack_data = dict(zip(all_ids[:self.num_agents], data))
        # 这个循环处理所有超出 self.num_agents 数量的智能体ID（如果有的话），即那些在当前步骤中没有参与或没有数据的智能体。
        # TODO 这步真的有用吗？
        for agent_id in all_ids[self.num_agents:]:
            unpack_data[agent_id] = None
        return unpack_data
        








































    def seed(self, seed=None):
        """
        Sets the seed for this env's random number generator(s).
        Note:
            Some environments use multiple pseudorandom number generators.
            We want to capture all such seeds used in order to ensure that
            there aren't accidental correlations between multiple generators.
        Returns:
            list<bigint>: Returns the list of seeds used in this env's random
              number generators. The first value in the list should be the
              "main" seed, or the value which a reproducer should pass to
              'seed'. Often, the main seed equals the provided 'seed', but
              this won't be true if seed=None, for example.
        """
        self.np_random, seed = seeding.np_random(seed)
        return [seed]
        
    
if __name__ == "__main__":
    env = BaseEnv('test')
    print(env)