import os.patorch
import numpy as np
import sys
import torch
import argparse
import random
import ptan
import gym  # 使用旧版gym
import gymnasium  # 同时导入gymnasium用于兼容ptan
from gym import spaces
import logging
from logging.handlers import RotatingFileHandler
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
import time
from collections import Counter
import copy
from gymnasium import spaces

from typing import NamedTuple, Optional, Union,Generator, Union, Any


class GymToGymnasiumWrapper(gymnasium.Env):
    """
    将gym.Env包装成gymnasium.Env以兼容ptan
    """
    def __init__(self, env):
        self.env = env
        self.observation_space = env.observation_space
        self.action_space = env.action_space
        
    def reset(self, seed=None, options=None):
        if seed is not None:
            self.env.seed(seed)
        result = self.env.reset()
        # 处理不同的reset返回格式
        if isinstance(result, tuple):
            obs = result[0]
            info = result[1] if len(result) > 1 else {}
        else:
            obs = result
            info = {}
        return obs, info
    
    def step(self, action):
        result = self.env.step(action)
        # gym-hybrid可能返回5个值: obs, reward, done, truncated, info
        if len(result) == 5:
            obs, reward, done, truncated, info = result
        elif len(result) == 4:
            obs, reward, done, info = result
            truncated = False
        else:
            raise ValueError(f"Unexpected number of return values from env.step: {len(result)}")
        return obs, reward, done, truncated, info
    
    def render(self):
        return self.env.render()
    
    def close(self):
        return self.env.close()
    
    def seed(self, seed=None):
        return self.env.seed(seed)



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_patorch = os.patorch.join(save_dir, f'{save_name}_{score}.ptorch')
    torch.save(state, save_patorch)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.patorch.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.patorch.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_patorch = os.patorch.join(checkpoint_dir, f'{save_name}_epoch_{iter}.ptorch')
    torch.save(state, checkpoint_patorch)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.patorch.join(checkpoint_dir, old_checkpoint))


def count_boundary(c_rate):
    median = (c_rate[0] - c_rate[1]) / 2 # 计算中值
    offset = c_rate[0] - 1 * median # 计算距离中值的偏移量
    return median, offset


def true_parameter_action(parameter_action, c_rate):
    # 参数嵌入空间的值映射回真实的动作参数范围
    # 转换公式：
    # true_param = normalized_param * scale + offset
    # todo 后续回头看看vae部分c_rate是如何对应的
    parameter_action_ = copy.deepcopy(parameter_action)
    for i in range(len(parameter_action)):
        median, offset = count_boundary(c_rate[i])
        parameter_action_[i] = parameter_action_[i] * median + offset
    return parameter_action_


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        self.total_steps = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, end_infos, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        for end_info in end_infos:
            self.total_rewards.append(end_info[0])
            self.total_steps.append(end_info[1])
            # 计算当前的平均帧率
            speed = (frame - self.ts_frame) / (time.time() - self.ts)
            # 将当前帧总数和所花费的时间存储在缓存中
            self.ts_frame = frame
            self.ts = time.time()
            # 计算平均激励值
            mean_reward = np.mean(self.total_rewards[-100:])
            mean_step = np.mean(self.total_steps[-100:])
            epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
            print("%d: done %d games, mean reward %.3f, mean step %.3f, speed %.2f f/s%s" % (
                frame, len(self.total_rewards), mean_reward, mean_step, speed, epsilon_str
            ))
            sys.stdout.flush()
            if epsilon is not None:
                self.writer.add_scalar("epsilon", epsilon, frame)
            self.writer.add_scalar("speed", speed, frame)
            self.writer.add_scalar("reward_100", mean_reward, frame)
            self.writer.add_scalar("reward", end_info[0], frame)
            self.writer.add_scalar("step_100", mean_step, frame)
            self.writer.add_scalar("step", end_info[1], frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])


def choose_action(env, net, state, header_number:int=None, epsilon=None, device='cpu'):
        '''
        history： historyDataset object, 观察样本处理对象
        header_number: int, number of ensemble head, 选择哪个头
        epsilon: float, epsilon for epsilon-greedy action selection, epsilon-贪婪策略中的epsilon
        '''
        if epsilon is not None:
            # 如果开启了epsilon-贪婪策略
            if np.random.random() <= epsilon:
                # 随机选中额一个动作
                return env.action_space.sample()
            else:
                # 如果没有随机选中动作，则使用目标模型选择动作
                witorch torch.no_grad():
                    state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                    if header_number is not None:
                        # 如果指定了头编号，则使用指定的头进行动作选择
                        action = net(state, header_number).detach().cpu()
                        return int(action.max(1).indices.numpy())
                    else:
                        # vote
                        # 没有指定头编号，则使用所有头进行投票选择动作
                        # 也就是每个头选择一个动作，然后选择动作Q值最大出现频率最高的动作
                        actions = net(state).detach()
                        actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                        actions = Counter(actions)
                        action = actions.most_common(1)[0][0]
                        return action
        else:
            # 如果没有传递epsilon参数，则直接使用训练的动作策略模型选择动作
            witorch torch.no_grad():
                state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                if header_number is not None:
                    action = net(state, header_number).detach().cpu()
                    return int(action.max(1).indices.numpy())
                else:
                    # vote
                    actions = net(state).detach()
                    actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                    actions = Counter(actions)
                    action = actions.most_common(1)[0][0]
                    return action



class HyArTd3Agent(ptan.agent.BaseAgent):

    def __init__(self, params, qactor, continuous_actor, policy_model, action_rep, discrete_action_dim, continuous_action_dim, discrete_emb_dim, continuous_emb_dim, max_action, env, device='cpu'):
        super().__init__()

        self.params = params
        self.action_rep = action_rep
        self.policy_model = policy_model
        self.max_action = max_action
        self.use_ornstein_noise = params.use_ornstein_noise
        self.discrete_action_dim = discrete_action_dim
        self.continuous_action_dim = continuous_action_dim
        self.discrete_emb_dim = discrete_emb_dim
        self.continuous_emb_dim = continuous_emb_dim
        self.qactor = qactor
        self.continuous_actor = continuous_actor
        self.device = device
        self.env = env
        self.cur_step = 0
        self.continuous_action_parameter_max_numpy = np.ones((self.continuous_action_dim,))
        self.continuous_action_parameter_min_numpy = -self.continuous_action_parameter_max_numpy
        self._seed(int(time.time()))
        self.compute_emb = False
        self.c_rate = None

    
    def initial_state(self):
        return None


    def get_epsilon(self, t):
        if t < self.params.epsilon_steps:
            epsilon = self.params.expl_noise_initial - (self.params.expl_noise_initial - self.params.expl_noise) * (
                    t / self.params.epsilon_steps)
        else:
            epsilon = self.params.expl_noise

        return epsilon


    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states witorch torche same lengtorch as observations
        :return: tuple of actions, states
        """
        actions = []
        agent_state_inner = []
        for state, agent_state in zip(states, agent_states):
            # all_continuous_actions：参考代码的all_action_parameters
            discrete_action, continuous_action, all_continuous_actions = self.act(state, agent_state)
            if self.compute_emb:
                discrete_emb, continuous_emb = self.policy_model.select_action(state)
                epsilon = self.get_epsilon(self.cur_step)
                # 选中的代码确实是在给动作嵌入增加噪音，这是HyAR算法中的探索机制
                discrete_emb = (
                        discrete_emb + np.random.normal(0, self.max_action * epsilon, size=self.discrete_emb_dim)
                ).clip(-self.max_action, self.max_action)
                continuous_emb = (
                        continuous_emb + np.random.normal(0, self.max_action * epsilon, size=self.continuous_emb_dim)
                ).clip(-self.max_action, self.max_action)


                # continuous_emb = continuous_emb * c_rate 这里仅仅只是将预测的连续动作的嵌入范围转换为真实动作的嵌入范围
                true_parameter_emb = true_parameter_action(continuous_emb, self.c_rate)

                # select discrete action
                discrete_action_embedding = copy.deepcopy(discrete_emb)
                discrete_action_embedding = torch.from_numpy(discrete_action_embedding).float().reshape(1, -1)
                similare_emb_discrete_action = self.action_rep.select_discrete_action(discrete_action_embedding) # 将动作嵌入转换为具体的离散动作
                similare_discrete_emb = self.action_rep.get_embedding(similare_emb_discrete_action).cpu().view(-1).data.numpy() # 将预测的离散动作转换为实际的动作嵌入
                discrete_2_continuous_action = self.action_rep.select_parameter_action(state, true_parameter_emb,
                                                                        similare_discrete_emb)
                # todo 这里需要适配输出的是对应离散动作的连续动作值，但是输出到env中的是一个包含所有离散动作对应的连续动作值的列表
                # 所以需要考虑如何转换
                actions.append((similare_emb_discrete_action, discrete_2_continuous_action))
                agent_state_inner.append((discrete_2_continuous_action, all_continuous_actions, discrete_emb, continuous_emb))
            else:
                # todo 这里也要适配 ，因为这里输出的连续动作是全部动作的连续动作，所以都要重构
                # discrete_2_continuous_action: 参考代码的act_param_
                discrete_2_continuous_action = continuous_action[2 * discrete_action: 2 * discrete_action + 2]
                # discrete_action: 参考代码的act
                actions.append((discrete_action, discrete_2_continuous_action))
                agent_state_inner.append((discrete_2_continuous_action, all_continuous_actions, None, None))

        self.cur_step += 1


        return actions, agent_state_inner

    def _seed(self, seed=None):
        """
        NOTE: torchis will not reset torche randomly initialised weights; use torche seed parameter in torche constructor instead.

        :param seed:
        :return:
        """
        self.seed = seed
        random.seed(seed)
        np.random.seed(seed)
        self.np_random = np.random.RandomState(seed=seed)
        if seed is not None:
            torch.manual_seed(seed)
            if self.device == torch.device("cuda"):
                torch.cuda.manual_seed(seed)


    def act(self, state, agent_state=None):
        '''
        state: 环境观察
        return: action: 离散动作索引；action_parameters: 该离散动作对应的连续动作参数（但是在当前的环境中，action_parameters=all_action_parameters）；all_action_parameters: 所有离散动作对应的连续动作参数
        '''
        witorch torch.no_grad():
            state = torch.from_numpy(state).to(self.device)

            all_action_parameters = self.continuous_actor(state)

            # Hausknecht and Stone [2016] use epsilon greedy actions witorch uniform random action-parameter exploration
            # 是随机选择一个动作还是使用actor模型预测一个动作
            rnd = self.np_random.uniform()
            if rnd < 1.0:
                action = self.np_random.choice(self.discrete_action_dim)
                if not self.use_ornstein_noise:
                    all_action_parameters = torch.from_numpy(np.random.uniform(self.continuous_action_parameter_min_numpy,
                                                              self.continuous_action_parameter_max_numpy))
            else:
                # 使用模型预测，输入状态和连续动作，输出每个离散动作的Q值
                Q_a = self.qactor.forward(state.unsqueeze(0), all_action_parameters.unsqueeze(0))
                Q_a = Q_a.detach().cpu().data.numpy()
                action = np.argmax(Q_a)  #返回最大离散动作的索引



            # add noise only to parameters of chosen action
            all_action_parameters = all_action_parameters.cpu().data.numpy()
            # offset = np.array([self.action_parameter_sizes[i] for i in range(action)], dtype=int).sum()
            # if self.use_ornstein_noise and self.noise is not None:
            #     all_action_parameters[0:4] += self.noise.sample()[0:4]


            # 对于当前的游戏环境，传入到env环境中需要全部的连续动作参数，因为环境需要知道所有离散动作对应的连续动作参数才能正确执行动作，所以这里直接返回所有连续动作参数。对于离散动作，返回的是离散动作的索引。
            action_parameters = all_action_parameters

        return action, action_parameters, all_action_parameters
    


class HyArTd3ReplayBuffer(ptan.experience.ExperienceReplayBuffer):

    def __init__(self, experience_source, buffer_size, device='cpu'):
        super().__init__(experience_source, buffer_size)
        self.device = device


    def sample(self, batch_size):
        batch_data = super().sample(batch_size)
        if len(batch_data) <= 0:
            raise IndexError("Not enough entries to sample from")
        
        states = []
        discrete_actions = []
        continuous_actions = []
        all_continuous_actions = []
        discrete_embs = []
        continuous_embs = []
        next_states = []
        state_2_next_states = []
        rewards = []
        not_dones = []
        for _, sample in enumerate(batch_data):
            states.append(np.array(sample[0], copy=False))
            discrete_actions.append(sample[1][0])
            continuous_actions.append(np.array(sample[1][1], copy=False))
            all_continuous_actions.append(np.array(sample[5][1], copy=False))
            # 处理可能为None的嵌入
            if sample[5][2] is not None:
                discrete_embs.append(np.array(sample[5][2], copy=False))
            else:
                discrete_embs.append(np.zeros(8))  # 使用零向量作为占位符
            if sample[5][3] is not None:
                continuous_embs.append(np.array(sample[5][3], copy=False))
            else:
                continuous_embs.append(np.zeros(8))  # 使用零向量作为占位符
            next_states.append(np.array(sample[4], copy=False))
            state_2_next_states.append(np.array(sample[7], copy=False))
            rewards.append(sample[2])
            not_dones.append(1.0 - sample[3])

        return (
            torch.FloatTensor(np.array(states)).to(self.device),
            torch.LongTensor(discrete_actions).to(self.device),
            torch.FloatTensor(np.array(continuous_actions)).to(self.device),
            torch.FloatTensor(np.array(all_continuous_actions)).to(self.device),
            torch.FloatTensor(np.array(discrete_embs)).to(self.device),
            torch.FloatTensor(np.array(continuous_embs)).to(self.device),
            torch.FloatTensor(np.array(next_states)).to(self.device),
            torch.FloatTensor(np.array(state_2_next_states)).to(self.device),
            torch.FloatTensor(rewards).to(self.device),
            torch.FloatTensor(not_dones).to(self.device),
        )

    

    def _add(self, sample):
        """
        将经验样本与 Bernoulli 掩码拼接后添加到缓冲区
        
        Args:
            sample: 经验元组 (obs, action, reward, done, next_obs, agent_state, step_info)
        """
        
        # 检查 sample 是否为嵌套元组
        if len(sample) == 1 and isinstance(sample[0], tuple):
            # 如果是嵌套的，解包内层元组
            inner_sample = sample[0]
            # 将掩码添加到经验元组的末尾
            # inner_sample[4] - inner_sample[0] 相当于：state_next_state
            extended_sample = inner_sample + (inner_sample
                                              [4] - inner_sample[0],)
        else:
            # 如果不是嵌套的，直接添加掩码
            extended_sample = sample + (sample[4] - sample[0],)
        
        # 调用父类方法添加扩展后的样本
        super()._add(extended_sample)


    
    def state_dict(self):
        state = {
            "buffer": self.buffer,
            "capacity": self.capacity,
            "pos": self.pos
        }
        return state

    def load_state_dict(self, state):
        self.buffer = state["buffer"]
        self.capacity = state["capacity"]
        self.pos = state["pos"]



def select_device(args):
    if args.gpu and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.gpu:
        return torch.device("mps")
    return torch.device("cpu")


class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmetorchod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[23:210, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmetorchod
    def save_state_as_image(state, filename):
        """Save torche state as a PNG image."""
        # Ensure torche state is a NumPy array witorch dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where torche user need to press FIRE for torche game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

import collections

class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack witorch NumPy
        return np.concatenate(list(self.frames), axis=0)
    

class ScaledStateWrapper(gym.ObservationWrapper):
    """
    Scales torche observation space to [-1,1]
    看起来是要将观察缩放到[-1,1]范围内
    """

    def __init__(self, env):
        super(ScaledStateWrapper, self).__init__(env)
        obs = env.observation_space
        self.compound = False
        self.low = None
        self.high = None
        print(type(obs))
        print(obs)
        if isinstance(obs, gym.spaces.Box):
            # 根据不同的观察空间类型，提取不同的低高值
            self.low = env.observation_space.low # 观察空间的低值
            self.high = env.observation_space.high # 观察空间的高值
            self.observation_space = gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
                                                    dtype=np.float32) # 创建一个新的观察空间，范围为[-1,1]
        elif isinstance(obs, Tuple):
            # 如果是元组类型的观察空间，假设第一个元素是Box类型，第二个元素是Discrete类型
            # todo 元组类型的第二个元素是干啥的？
            self.low = obs.spaces[0].low
            self.high = obs.spaces[0].high
            assert len(obs.spaces) == 2 and isinstance(obs.spaces[1], gym.spaces.Discrete)
            self.observation_space = Tuple(
                (gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
                                dtype=np.float32),
                 obs.spaces[1]))
            self.compound = True # 这个是干啥的？
        else:
            raise Exception("Unsupported observation space type: %s" % self.observation_space)

    def scale_state(self, state):
        state = 2. * (state - self.low) / (self.high - self.low) - 1.
        return state

    def _unscale_state(self, scaled_state):
        state = (self.high - self.low) * (scaled_state + 1.) / 2. + self.low
        return state

    def observation(self, obs):
        if self.compound:
            state, steps = obs
            ret = (self.scale_state(state), steps)
        else:
            ret = self.scale_state(obs)
        return ret
    

class ScaledParameterisedActionWrapper(gym.ActionWrapper):
    """
    Changes torche scale of torche continuous action parameters to [-1,1].
    Parameter space must be flattened!

    看起来这里是将动作空间中的连续参数缩放到[-1,1]范围内
    这里假设动作空间是扁平化的，即参数空间不是嵌套的Tuple，而是直接展开的Box列表

    Tuple((
        Discrete(n),
        Box(c_1),
        Box(c_2),
        ...
        Box(c_n)
        )
    """

    def __init__(self, env):
        super(ScaledParameterisedActionWrapper, self).__init__(env)
        self.old_as = env.action_space # 获取旧的动作空间 0 是离散动作空间，1到n是连续动作参数空间
        self.num_actions = self.old_as[0].n # 离散动作的数量
        self.high = [self.old_as.spaces[i].high for i in range(1, self.num_actions + 1)] # 每个离散动作对应的参数空间的高值
        self.low = [self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)] # 每个离散动作对应的参数空间的低值
        self.range = [self.old_as.spaces[i].high - self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)] # 每个参数空间的范围
        # 这里创建一个新的动作空间，将连续动作的参数空间的范围被缩放到[-1,1]
        new_params = [  # parameters
            Box(-np.ones(self.old_as.spaces[i].low.shape), np.ones(self.old_as.spaces[i].high.shape), dtype=np.float32)
            for i in range(1, self.num_actions + 1)
        ]
        # 构建新的动作空间
        # self.action_space[0]是离散动作空间
        # 后面的Box是缩放后的连续参数空间
        self.action_space = Tuple((
            self.old_as.spaces[0],  # actions
            *new_params,
        ))

    def action(self, action):
        """
        Rescale from [-1,1] to original action-parameter range.

        :param action:
        :return:
        """
        action = copy.deepcopy(action)
        p = action[0]
        action[1][p] = self.range[p] * (action[1][p] + 1) / 2. + self.low[p]
        return action


"""
Gym Hybrid 环境的 Wrapper 工具集
"""
import gym
import numpy as np
from gym import spaces
from typing import Tuple


class NormalizeActionWrapper(gym.ActionWrapper):
    """
    将混合动作空间归一化到 [-1, 1] 的 Wrapper
    
    原始动作空间 (BaseEnv):
        - 离散部分: Discrete(3) -> {0, 1, 2}
        - 连续部分: Box([0, -1], [1, 1])
    
    归一化后动作空间:
        - 离散部分: 保持不变 Discrete(3)
        - 连续部分: Box([-1, -1], [1, 1])
    
    使用示例:
        env = gym.make('Moving-v0')
        env = NormalizeActionWrapper(env)
        
        # 现在可以使用归一化的动作
        action = (1, np.array([-0.5, 0.8]))  # 所有连续值都在 [-1, 1]
        obs, reward, terminated, truncated, info = env.step(action)
    """
    
    def __init__(self, env: gym.Env):
        """
        初始化归一化 Wrapper
        
        Args:
            env: 原始环境实例
        """
        super().__init__(env)
        
        # 获取原始动作空间的信息
        if not isinstance(env.action_space, spaces.Tuple):
            raise ValueError("此 Wrapper 只支持 Tuple 类型的混合动作空间")
        
        discrete_space = env.action_space[0]
        continuous_space = env.action_space[1]
        
        if not isinstance(discrete_space, spaces.Discrete):
            raise ValueError("混合动作空间的第一个元素必须是 Discrete 类型")
        
        if not isinstance(continuous_space, spaces.Box):
            raise ValueError("混合动作空间的第二个元素必须是 Box 类型")
        
        # 保存原始连续空间的范围
        self.original_low = continuous_space.low.copy()
        self.original_high = continuous_space.high.copy()
        
        # 创建归一化后的动作空间: 连续部分都映射到 [-1, 1]
        normalized_continuous_space = spaces.Box(
            low=np.array([-1.0] * continuous_space.shape[0], dtype=np.float32),
            high=np.array([1.0] * continuous_space.shape[0], dtype=np.float32),
            dtype=np.float32
        )
        
        # 更新动作空间
        self.action_space = spaces.Tuple((discrete_space, normalized_continuous_space))
        
        print(f"[NormalizeActionWrapper] 原始连续空间范围: [{self.original_low}, {self.original_high}]")
        print(f"[NormalizeActionWrapper] 归一化后范围: [[-1, -1], [1, 1]]")
    
    def action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        将归一化的动作 [-1, 1] 映射回原始动作空间
        
        映射公式:
            original_value = low + (normalized_value + 1) * (high - low) / 2
        
        其中:
            - normalized_value ∈ [-1, 1]
            - original_value ∈ [low, high]
        
        Args:
            action: 归一化后的动作 (discrete_id, normalized_continuous_params)
        
        Returns:
            原始动作空间的动作 (discrete_id, original_continuous_params)
        """
        discrete_action, normalized_continuous = action
        
        # 确保是 numpy 数组
        normalized_continuous = np.array(normalized_continuous, dtype=np.float32)
        
        # 裁剪到 [-1, 1] 范围（防止数值误差）
        normalized_continuous = np.clip(normalized_continuous, -1.0, 1.0)
        
        # 线性映射: [-1, 1] -> [original_low, original_high]
        original_continuous = self.original_low + \
            (normalized_continuous + 1.0) * (self.original_high - self.original_low) / 2.0
        
        return (discrete_action, original_continuous)
    
    def reverse_action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        将原始动作空间的动作映射到归一化空间（用于调试或逆向操作）
        
        映射公式:
            normalized_value = 2 * (original_value - low) / (high - low) - 1
        
        Args:
            action: 原始动作空间的动作
        
        Returns:
            归一化后的动作
        """
        discrete_action, original_continuous = action
        
        original_continuous = np.array(original_continuous, dtype=np.float32)
        
        # 线性映射: [original_low, original_high] -> [-1, 1]
        normalized_continuous = 2.0 * (original_continuous - self.original_low) / \
            (self.original_high - self.original_low) - 1.0
        
        # 裁剪到 [-1, 1]
        normalized_continuous = np.clip(normalized_continuous, -1.0, 1.0)
        
        return (discrete_action, normalized_continuous)


class NormalizeHardMoveActionWrapper(gym.ActionWrapper):
    """
    将 HardMoveEnv 的混合动作空间归一化到 [-1, 1] 的 Wrapper
    
    原始动作空间 (HardMoveEnv, num_actuators=4):
        - 离散部分: Discrete(16) -> {0, 1, ..., 15}
        - 连续部分: Box([-1, -1, -1, -1], [1, 1, 1, 1])
    
    由于 HardMoveEnv 的连续空间已经是 [-1, 1]，此 Wrapper 主要用于:
    1. 统一接口
    2. 提供额外的裁剪和验证功能
    3. 与其他环境保持一致的 Wrapper 使用方式
    """
    
    def __init__(self, env: gym.Env):
        super().__init__(env)
        
        if not isinstance(env.action_space, spaces.Tuple):
            raise ValueError("此 Wrapper 只支持 Tuple 类型的混合动作空间")
        
        print(f"[NormalizeHardMoveActionWrapper] HardMove 环境的连续空间已经归一化到 [-1, 1]")
    
    def action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        对 HardMoveEnv，主要进行裁剪操作
        """
        discrete_action, continuous_params = action
        
        continuous_params = np.array(continuous_params, dtype=np.float32)
        continuous_params = np.clip(continuous_params, -1.0, 1.0)
        
        return (discrete_action, continuous_params)


class VerboseActionWrapper(gym.Wrapper):
    """
    打印动作信息的调试 Wrapper
    """
    
    def __init__(self, env: gym.Env, print_every: int = 1):
        super().__init__(env)
        self.print_every = print_every
        self.step_count = 0
    
    def step(self, action):
        self.step_count += 1
        
        if self.step_count % self.print_every == 0:
            discrete_action, continuous_params = action
            print(f"\n[Step {self.step_count}] 动作信息:")
            print(f"  离散动作ID: {discrete_action}")
            print(f"  连续参数: {continuous_params}")
            
            # 解析动作类型（针对 BaseEnv）
            if hasattr(self.env, 'action_space') and isinstance(self.env.action_space[0], spaces.Discrete):
                if self.env.action_space[0].n == 3:
                    action_names = {0: "ACCELERATE", 1: "TURN", 2: "BREAK"}
                    print(f"  动作类型: {action_names.get(discrete_action, 'UNKNOWN')}")
        
        return self.env.step(action)
    
    def reset(self, **kwargs):
        self.step_count = 0
        return self.env.reset(**kwargs)


def wrap_dqn(env_name, episodic_life = False, gray = True, clip_reward = False, stack_frames = 4):
    import gym as old_gym  # 使用旧版gym来创建环境
    env = old_gym.make(env_name)
    env = ScaledStateWrapper(env)  # 将观察空间缩放到[-1,1]范围内
    env = NormalizeActionWrapper(env)  # 将动作空间归一化到[-1,1]范围内

    # 包装成gymnasium.Env以兼容ptan
    env = GymToGymnasiumWrapper(env)
    
    # 由于ptan的wrapper与gym版本不兼容，我们简化包装，只使用必要的wrapper
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # if episodic_life:
    #     # 将多条生命的游戏模拟成单条生命ActorCriticAgent
    #     env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # # 增强初始化
    # env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    # if 'FIRE' in env.unwrapped.get_action_meanings():
    #     env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    # env = ProcessFrame(env, gray=gray, size=84)
    # env = ptan.common.wrappers.ImageTotorch(env)
    # env = FrameStack(env, stack_frames)
    # if clip_reward:
    #     env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_patorch):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.patorch.join(save_patorch, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(console)

    return logger

def unpack_batch(batch, device='cpu'):
    states = []
    actions = []
    next_states = []
    rewards = []
    dones = []
    masks = []

    for exp in batch:
        states.append(exp[0])
        actions.append(exp[1])
        next_states.append(exp[4])
        rewards.append(exp[2])
        dones.append(exp[3])
        masks.append(exp[7])

    states_t = torch.tensor(np.array(states), dtype=torch.float).to(device=device)
    actions_t = torch.tensor(np.array(actions), dtype=torch.long).to(device=device).unsqueeze(1)
    rewards_t = torch.tensor(np.array(rewards), dtype=torch.float).to(device=device)
    next_states_t = torch.tensor(np.array(next_states), dtype=torch.float).to(device=device)
    dones_t = torch.tensor(np.array(dones), dtype=torch.long).to(device=device).unsqueeze(1)
    masks_t = torch.tensor(np.array(masks), dtype=torch.float).to(device=device)


    return states_t, actions_t, rewards_t, next_states_t, dones_t, masks_t


def get_action_dim(action_space: spaces.Space) -> tuple:
    if isinstance(action_space, spaces.Box):
        return int(np.prod(action_space.shape)), 0  # (连续动作维度, 离散动作维度)
    elif isinstance(action_space, spaces.Discrete):
        # Action is an int
        return 0, 1  # (连续动作维度, 离散动作维度)
    elif isinstance(action_space, spaces.MultiDiscrete):
        # Number of discrete actions
        return 0, int(len(action_space.nvec))  # (连续动作维度, 离散动作维度)
    elif isinstance(action_space, spaces.MultiBinary):
        # Number of binary actions
        assert isinstance(
            action_space.n, int
        ), "Multi-dimensional MultiBinary action space is not supported. You can flatten it instead."
        return 0, int(action_space.n)  # (连续动作维度, 离散动作维度)
    elif isinstance(action_space, spaces.Dict):
        return int(np.prod(action_space['continuous_action'].shape)), 1  # (连续动作维度, 离散动作维度)
    elif isinstance(action_space, spaces.Tuple):
        # Tuple 类型动作空间，假设第一个是离散动作，第二个是连续动作
        continuous_dim = 0
        discrete_dim = 0
        
        if isinstance(action_space[0], spaces.Box):
            continuous_dim = int(np.prod(action_space[0].shape))
        elif isinstance(action_space[0], (spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary)):
            discrete_dim = 1 if isinstance(action_space[0], spaces.Discrete) else int(len(action_space[0].nvec)) if isinstance(action_space[0], spaces.MultiDiscrete) else int(action_space[0].n)
            
        if isinstance(action_space[1], spaces.Box):
            continuous_dim = int(np.prod(action_space[1].shape))
        elif isinstance(action_space[1], (spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary)):
            discrete_dim = 1 if isinstance(action_space[1], spaces.Discrete) else int(len(action_space[1].nvec)) if isinstance(action_space[1], spaces.MultiDiscrete) else int(action_space[1].n)
            
        return continuous_dim, discrete_dim  # (连续动作维度, 离散动作维度)
    else:
        raise NotImplementedError(f"{action_space} action space is not supported")


# 用来存储每次采样的数据
class HYRolloutBufferSamples(NamedTuple):
    observations: torch.Tensor
    actions_con: torch.Tensor
    actions_disc: torch.Tensor
    old_values: torch.Tensor
    old_log_probs_con: torch.Tensor
    old_log_probs_disc: torch.Tensor
    advantages: torch.Tensor
    returns: torch.Tensor

class HYRolloutBuffer():
    observations: np.ndarray
    actions_con: np.ndarray
    actions_disc: np.ndarray
    rewards: np.ndarray
    advantages: np.ndarray
    returns: np.ndarray
    episode_starts: np.ndarray
    log_probs_con: np.ndarray
    log_probs_disc: np.ndarray
    values: np.ndarray

    def __init__(
        self,
        buffer_size: int, 
        observation_space: spaces.Space,
        action_space: spaces.Space,
        device: Union[torch.device, str] = "cpu",
        gae_lambda: float = 1,
        gamma: float = 0.99,
        n_envs: int = 1,
    ):
        self.buffer_size = buffer_size
        self.observation_space = observation_space
        self.action_space = action_space
        self.obs_shape = observation_space.shape # 应对不同类型的观测空间，获取观测的形状的帮助方法
        self.action_con_dim, self.action_disc_dim = get_action_dim(action_space) # 获取连续和离散动作的维度
        self.pos = 0 # 当前缓存区的位置指针，用于计算添加数据的位置，就算满了也可以采用余数计算
        self.full = False # 缓存区是否已满的标志，这个主要是为了在采样时确保缓存区已满，采用不同的采样方式
        self.device = device
        self.n_envs = n_envs
        self.gae_lambda = gae_lambda
        self.gamma = gamma
        self.generator_ready = False
        self.reset()


    def reset(self) -> None:
        '''
        重置缓存区的缓存
        '''
        self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=np.float32)
        self.actions_disc = np.zeros((self.buffer_size, self.n_envs, self.action_disc_dim), dtype=np.float32)
        self.actions_con = np.zeros((self.buffer_size, self.n_envs, self.action_con_dim), dtype=np.float32)
        self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
        self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) # 存储计算得到的每一步的回报
        self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
        self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
        self.log_probs_disc = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
        self.log_probs_con = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
        self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32) # 存储计算得到的每一步的优势函数
        self.last_done = np.zeros((self.n_envs,), dtype=np.float32)
        self.generator_ready = False
        self.pos = 0
        self.full = False
        
    def compute_returns_and_advantage(self, last_values: torch.Tensor, dones: np.ndarray) -> None:
        '''
        计算优势函数和回报

        last_values: torch.Tensor 采集结束时的new_obs的价值估计 
        dones: np.ndarray 对应采集结束时执行的动作是否导致游戏结束的标识
        '''

        last_values = last_values.clone().cpu().numpy().flatten()

        last_gae_lam = 0
        for step in reversed(range(self.buffer_size)):
            if step == self.buffer_size - 1:
                # 处理下一步的情况，而这里的下一步对应的是缓冲区中当前step的下一步的状态和价值估计
                next_non_terminal = 1.0 - dones # 如果游戏结束则为0，否则为1
                next_values = last_values # 采集结束时的价值估计
            else:
                # 这里使用的step + 1对应的是缓冲区中当前step的下一步的状态和价值估计
                next_non_terminal = 1.0 - self.episode_starts[step + 1] # 每下一步 如果游戏结束则为0，否则为1
                next_values = self.values[step + 1] # 下一步的价值估计

            # self.rewards[step] + self.gamma * next_values * next_non_terminal :有点类似bellman方程的形式计算Q值，不同的是如果游戏结束则不考虑下一步的价值估计，只为reward
            # 减去 self.values[step] 应该是得到预测的value和实际的TD目标之间的差值，也就是TD误差
            delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]
            # 计算GAE优势估计，也就是计算TD误差的加权和（一个序列的优势估计，对连续的时间步的采集有优势，有的时候短时间的损失是为了更大的回报），如果为正数则表示实际回报高于预测价值（选择实际的动作，远离现有的动作预测），负数则表示低于预测价值（反之）
            last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam
            self.advantages[step] = last_gae_lam
        self.returns = self.advantages + self.values # 将减去的价值加回去价值估计，得到类似bellman目标的回报值


    def add(self, exp):
        self.add(
            exp[0],
            exp[1][0],
            exp[1][1],
            exp[2],
            self.last_done,
            # todo 以下三个在采集样本是要计算存储到agent_state中
            exp[5][0], 
            exp[5][1],
            exp[5][2]
        )

        self.last_done = exp[3]


    def __add(
        self,
        obs: np.ndarray,
        action_disc: np.ndarray,
        action_con: np.ndarray,
        reward: np.ndarray,
        episode_start: np.ndarray,
        value: torch.Tensor,
        log_probs_disc: torch.Tensor,
        log_probs_con: torch.Tensor,
        ):
        '''
        将一个时间步的数据添加到缓存区
        观测 obs: np.ndarray
        obs对应的离散动作 action_disc: np.ndarray
        obs对应的连续动作 action_con: np.ndarray
        奖励 reward: np.ndarray
        回合开始标志 episode_start: np.ndarray 如果游戏结束则为True，否则为False，这个应该是用来区分不同回合的
        价值估计 value: torch.Tensor
        离散动作的对数概率 log_probs_disc: torch.Tensor
        连续动作的对数概率 log_probs_con: torch.Tensor
        '''
        self.observations[self.pos] = np.array(obs).copy()
        self.actions_disc[self.pos] = np.array(action_disc).copy()
        self.actions_con[self.pos] = np.array(action_con).copy()
        self.log_probs_disc[self.pos] = log_probs_disc.clone().cpu().numpy()
        self.log_probs_con[self.pos] = log_probs_con.clone().cpu().numpy()
        self.values[self.pos] = value.clone().cpu().numpy().flatten()
        self.rewards[self.pos] = np.array(reward).copy()
        self.episode_starts[self.pos] = np.array(episode_start).copy()
        self.pos += 1
        if self.pos == self.buffer_size:
            self.full = True

    def filter(self):
        obs = self.observations[:self.pos]
        return np.mean(obs), np.std(obs)

    def get(self, batch_size: Optional[int] = None) -> Generator[HYRolloutBufferSamples, None, None]:
        '''
        获取指定batch_size的小批量数据生成器，如果batch_size为None则返回整个缓存区的数据
        '''
        assert self.full, ""
        # self.buffer_size * self.n_envs：总的数据量
        # 打乱数据的索引顺序，以便后续随机采样
        # ppo训练并非要排序好的数据，而是要打乱的数据，而之前之所以要排序好，主要是为了计算return 和 advantage
        indices = np.random.permutation(self.buffer_size * self.n_envs)
        # Prepare torche data # 🔑 关键：准备数据（只在第一次调用时执行）,除非reset
        if not self.generator_ready:
            # 准备好缓冲区的缓存key对应的数据，将其从三维张量重塑为二维张量
            _tensor_names = [
                "observations",
                "actions_con",
                "actions_disc",
                "values",
                "log_probs_con",
                "log_probs_disc",
                "advantages",
                "returns",
            ]
            for tensor in _tensor_names:
                # 准备训练数据时，将缓冲区数据从三维张量重塑为二维张量，以便后续的批量采样和训练
                # 具体做法是先交换前两个维度，然后将前两个维度展平为一个维度
                # 这样做的目的是将不同环境和时间步的数据混合在一起，方便后续的随机采样 todo 实际调试时验证一下是否如此
                self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])
            self.generator_ready = True

        # Return everytorching, don't create minibatches 如果没有指定batch_size，则返回整个缓存区的数据
        if batch_size is None:
            batch_size = self.buffer_size * self.n_envs

        start_idx = 0 # 起始索引
        while start_idx < self.buffer_size * self.n_envs: # 遍历整个缓存区的数据
            # 采用yield每次返回一个小批量的数据
            # indices[start_idx : start_idx + batch_size]：直接从打乱的索引中获取当前批次的索引
            yield self._get_samples(indices[start_idx : start_idx + batch_size])
            start_idx += batch_size

    def _get_samples(
        self,
        batch_inds: np.ndarray,
        env: Optional[VecNormalize] = None,
    ) -> HYRolloutBufferSamples:  # type: ignore[signature-mismatch] #FIXME
        '''
        根据给定的索引获取对应的小批量数据
        '''
        data = (
            self.observations[batch_inds],
            self.actions_con[batch_inds],
            self.actions_disc[batch_inds],
            self.values[batch_inds].flatten(),
            self.log_probs_con[batch_inds].flatten(),
            self.log_probs_disc[batch_inds].flatten(),
            self.advantages[batch_inds].flatten(),
            self.returns[batch_inds].flatten(),
        )
        # map(self.to_torch, data): 将数据转换为张量
        # *tuple： 将元组解包为位置参数传递给HYRolloutBufferSamples
        return HYRolloutBufferSamples(*tuple(map(self.to_torch, data)))
