import os.path
from typing import Any
import numpy as np
import sys
import torch
import argparse
import random
import ptan
import gym  # 使用旧版gym
import gymnasium  # 同时导入gymnasium用于兼容ptan
from gym import spaces
import logging
from logging.handlers import RotatingFileHandler
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
import time
from collections import Counter
import copy


class GymToGymnasiumWrapper(gymnasium.Env):
    """
    将gym.Env包装成gymnasium.Env以兼容ptan
    """
    def __init__(self, env):
        self.env = env
        self.observation_space = env.observation_space
        self.action_space = env.action_space
        
    def reset(self, seed=None, options=None):
        if seed is not None:
            self.env.seed(seed)
        result = self.env.reset()
        # 处理不同的reset返回格式
        if isinstance(result, tuple):
            obs = result[0]
            info = result[1] if len(result) > 1 else {}
        else:
            obs = result
            info = {}
        return obs, info
    
    def step(self, action):
        result = self.env.step(action)
        # gym-hybrid可能返回5个值: obs, reward, done, truncated, info
        if len(result) == 5:
            obs, reward, done, truncated, info = result
        elif len(result) == 4:
            obs, reward, done, info = result
            truncated = False
        else:
            raise ValueError(f"Unexpected number of return values from env.step: {len(result)}")
        return obs, reward, done, truncated, info
    
    def render(self):
        return self.env.render()
    
    def close(self):
        return self.env.close()
    
    def seed(self, seed=None):
        return self.env.seed(seed)



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


def count_boundary(c_rate):
    median = (c_rate[0] - c_rate[1]) / 2 # 计算中值
    offset = c_rate[0] - 1 * median # 计算距离中值的偏移量
    return median, offset


def true_parameter_action(parameter_action, c_rate):
    # 参数嵌入空间的值映射回真实的动作参数范围
    # 转换公式：
    # true_param = normalized_param * scale + offset
    # todo 后续回头看看vae部分c_rate是如何对应的
    parameter_action_ = copy.deepcopy(parameter_action)
    for i in range(len(parameter_action)):
        median, offset = count_boundary(c_rate[i])
        parameter_action_[i] = parameter_action_[i] * median + offset
    return parameter_action_


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        self.total_steps = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, end_infos, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        for end_info in end_infos:
            self.total_rewards.append(end_info[0])
            self.total_steps.append(end_info[1])
            # 计算当前的平均帧率
            speed = (frame - self.ts_frame) / (time.time() - self.ts)
            # 将当前帧总数和所花费的时间存储在缓存中
            self.ts_frame = frame
            self.ts = time.time()
            # 计算平均激励值
            mean_reward = np.mean(self.total_rewards[-100:])
            mean_step = np.mean(self.total_steps[-100:])
            epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
            print("%d: done %d games, mean reward %.3f, mean step %.3f, speed %.2f f/s%s" % (
                frame, len(self.total_rewards), mean_reward, mean_step, speed, epsilon_str
            ))
            sys.stdout.flush()
            if epsilon is not None:
                self.writer.add_scalar("epsilon", epsilon, frame)
            self.writer.add_scalar("speed", speed, frame)
            self.writer.add_scalar("reward_100", mean_reward, frame)
            self.writer.add_scalar("reward", end_info[0], frame)
            self.writer.add_scalar("step_100", mean_step, frame)
            self.writer.add_scalar("step", end_info[1], frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])


def choose_action(env, net, state, header_number:int=None, epsilon=None, device='cpu'):
        '''
        history： historyDataset object, 观察样本处理对象
        header_number: int, number of ensemble head, 选择哪个头
        epsilon: float, epsilon for epsilon-greedy action selection, epsilon-贪婪策略中的epsilon
        '''
        if epsilon is not None:
            # 如果开启了epsilon-贪婪策略
            if np.random.random() <= epsilon:
                # 随机选中额一个动作
                return env.action_space.sample()
            else:
                # 如果没有随机选中动作，则使用目标模型选择动作
                with torch.no_grad():
                    state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                    if header_number is not None:
                        # 如果指定了头编号，则使用指定的头进行动作选择
                        action = net(state, header_number).detach().cpu()
                        return int(action.max(1).indices.numpy())
                    else:
                        # vote
                        # 没有指定头编号，则使用所有头进行投票选择动作
                        # 也就是每个头选择一个动作，然后选择动作Q值最大出现频率最高的动作
                        actions = net(state).detach()
                        actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                        actions = Counter(actions)
                        action = actions.most_common(1)[0][0]
                        return action
        else:
            # 如果没有传递epsilon参数，则直接使用训练的动作策略模型选择动作
            with torch.no_grad():
                state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                if header_number is not None:
                    action = net(state, header_number).detach().cpu()
                    return int(action.max(1).indices.numpy())
                else:
                    # vote
                    actions = net(state).detach()
                    actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                    actions = Counter(actions)
                    action = actions.most_common(1)[0][0]
                    return action



class HyArTd3Agent(ptan.agent.BaseAgent):

    def __init__(self, params, qactor, continuous_actor, policy_model, action_rep, discrete_action_dim, continuous_action_dim, discrete_emb_dim, continuous_emb_dim, max_action, env, device='cpu'):
        super().__init__()

        self.params = params
        self.action_rep = action_rep
        self.policy_model = policy_model
        self.max_action = max_action
        self.use_ornstein_noise = params.use_ornstein_noise
        self.discrete_action_dim = discrete_action_dim
        self.continuous_action_dim = continuous_action_dim
        self.discrete_emb_dim = discrete_emb_dim
        self.continuous_emb_dim = continuous_emb_dim
        self.qactor = qactor
        self.continuous_actor = continuous_actor
        self.device = device
        self.env = env
        self.cur_step = 0
        self.continuous_action_parameter_max_numpy = np.ones((self.continuous_action_dim,))
        self.continuous_action_parameter_min_numpy = -self.continuous_action_parameter_max_numpy
        self._seed(int(time.time()))
        self.compute_emb = False
        self.c_rate = None

    
    def initial_state(self):
        return None


    def get_epsilon(self, t):
        if t < self.params.epsilon_steps:
            epsilon = self.params.expl_noise_initial - (self.params.expl_noise_initial - self.params.expl_noise) * (
                    t / self.params.epsilon_steps)
        else:
            epsilon = self.params.expl_noise

        return epsilon


    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states with the same length as observations
        :return: tuple of actions, states
        """
        actions = []
        agent_state_inner = []
        for state, agent_state in zip(states, agent_states):
            # all_continuous_actions：参考代码的all_action_parameters
            discrete_action, continuous_action, all_continuous_actions = self.act(state, agent_state)
            if self.compute_emb:
                discrete_emb, continuous_emb = self.policy_model.select_action(state)
                epsilon = self.get_epsilon(self.cur_step)
                # 选中的代码确实是在给动作嵌入增加噪音，这是HyAR算法中的探索机制
                discrete_emb = (
                        discrete_emb + np.random.normal(0, self.max_action * epsilon, size=self.discrete_emb_dim)
                ).clip(-self.max_action, self.max_action)
                continuous_emb = (
                        continuous_emb + np.random.normal(0, self.max_action * epsilon, size=self.continuous_emb_dim)
                ).clip(-self.max_action, self.max_action)


                # continuous_emb = continuous_emb * c_rate 这里仅仅只是将预测的连续动作的嵌入范围转换为真实动作的嵌入范围
                true_parameter_emb = true_parameter_action(continuous_emb, self.c_rate)

                # select discrete action
                discrete_action_embedding = copy.deepcopy(discrete_emb)
                discrete_action_embedding = torch.from_numpy(discrete_action_embedding).float().reshape(1, -1)
                similare_emb_discrete_action = self.action_rep.select_discrete_action(discrete_action_embedding) # 将动作嵌入转换为具体的离散动作
                similare_discrete_emb = self.action_rep.get_embedding(similare_emb_discrete_action).cpu().view(-1).data.numpy() # 将预测的离散动作转换为实际的动作嵌入
                discrete_2_continuous_action = self.action_rep.select_parameter_action(state, true_parameter_emb,
                                                                        similare_discrete_emb)
                # todo 这里需要适配输出的是对应离散动作的连续动作值，但是输出到env中的是一个包含所有离散动作对应的连续动作值的列表
                # 所以需要考虑如何转换
                actions.append((similare_emb_discrete_action, discrete_2_continuous_action))
                agent_state_inner.append((discrete_2_continuous_action, all_continuous_actions, discrete_emb, continuous_emb))
            else:
                # todo 这里也要适配 ，因为这里输出的连续动作是全部动作的连续动作，所以都要重构
                # discrete_2_continuous_action: 参考代码的act_param_
                discrete_2_continuous_action = continuous_action[2 * discrete_action: 2 * discrete_action + 2]
                # discrete_action: 参考代码的act
                actions.append((discrete_action, discrete_2_continuous_action))
                agent_state_inner.append((discrete_2_continuous_action, all_continuous_actions, None, None))

        self.cur_step += 1


        return actions, agent_state_inner

    def _seed(self, seed=None):
        """
        NOTE: this will not reset the randomly initialised weights; use the seed parameter in the constructor instead.

        :param seed:
        :return:
        """
        self.seed = seed
        random.seed(seed)
        np.random.seed(seed)
        self.np_random = np.random.RandomState(seed=seed)
        if seed is not None:
            torch.manual_seed(seed)
            if self.device == torch.device("cuda"):
                torch.cuda.manual_seed(seed)


    def act(self, state, agent_state=None):
        '''
        state: 环境观察
        return: action: 离散动作索引；action_parameters: 该离散动作对应的连续动作参数（但是在当前的环境中，action_parameters=all_action_parameters）；all_action_parameters: 所有离散动作对应的连续动作参数
        '''
        with torch.no_grad():
            state = torch.from_numpy(state).to(self.device)

            all_action_parameters = self.continuous_actor(state)

            # Hausknecht and Stone [2016] use epsilon greedy actions with uniform random action-parameter exploration
            # 是随机选择一个动作还是使用actor模型预测一个动作
            rnd = self.np_random.uniform()
            if rnd < 1.0:
                action = self.np_random.choice(self.discrete_action_dim)
                if not self.use_ornstein_noise:
                    all_action_parameters = torch.from_numpy(np.random.uniform(self.continuous_action_parameter_min_numpy,
                                                              self.continuous_action_parameter_max_numpy))
            else:
                # 使用模型预测，输入状态和连续动作，输出每个离散动作的Q值
                Q_a = self.qactor.forward(state.unsqueeze(0), all_action_parameters.unsqueeze(0))
                Q_a = Q_a.detach().cpu().data.numpy()
                action = np.argmax(Q_a)  #返回最大离散动作的索引



            # add noise only to parameters of chosen action
            all_action_parameters = all_action_parameters.cpu().data.numpy()
            # offset = np.array([self.action_parameter_sizes[i] for i in range(action)], dtype=int).sum()
            # if self.use_ornstein_noise and self.noise is not None:
            #     all_action_parameters[0:4] += self.noise.sample()[0:4]


            # 对于当前的游戏环境，传入到env环境中需要全部的连续动作参数，因为环境需要知道所有离散动作对应的连续动作参数才能正确执行动作，所以这里直接返回所有连续动作参数。对于离散动作，返回的是离散动作的索引。
            action_parameters = all_action_parameters

        return action, action_parameters, all_action_parameters
    


class HyArTd3ReplayBuffer(ptan.experience.ExperienceReplayBuffer):

    def __init__(self, experience_source, buffer_size, device='cpu'):
        super().__init__(experience_source, buffer_size)
        self.device = device


    def sample(self, batch_size):
        batch_data = super().sample(batch_size)
        if len(batch_data) <= 0:
            raise IndexError("Not enough entries to sample from")
        
        states = []
        discrete_actions = []
        continuous_actions = []
        all_continuous_actions = []
        discrete_embs = []
        continuous_embs = []
        next_states = []
        state_2_next_states = []
        rewards = []
        not_dones = []
        for _, sample in enumerate(batch_data):
            states.append(np.array(sample[0], copy=False))
            discrete_actions.append(sample[1][0])
            continuous_actions.append(np.array(sample[1][1], copy=False))
            all_continuous_actions.append(np.array(sample[5][1], copy=False))
            # 处理可能为None的嵌入
            if sample[5][2] is not None:
                discrete_embs.append(np.array(sample[5][2], copy=False))
            else:
                discrete_embs.append(np.zeros(8))  # 使用零向量作为占位符
            if sample[5][3] is not None:
                continuous_embs.append(np.array(sample[5][3], copy=False))
            else:
                continuous_embs.append(np.zeros(8))  # 使用零向量作为占位符
            next_states.append(np.array(sample[4], copy=False))
            state_2_next_states.append(np.array(sample[7], copy=False))
            rewards.append(sample[2])
            not_dones.append(1.0 - sample[3])

        return (
            torch.FloatTensor(np.array(states)).to(self.device),
            torch.LongTensor(discrete_actions).to(self.device),
            torch.FloatTensor(np.array(continuous_actions)).to(self.device),
            torch.FloatTensor(np.array(all_continuous_actions)).to(self.device),
            torch.FloatTensor(np.array(discrete_embs)).to(self.device),
            torch.FloatTensor(np.array(continuous_embs)).to(self.device),
            torch.FloatTensor(np.array(next_states)).to(self.device),
            torch.FloatTensor(np.array(state_2_next_states)).to(self.device),
            torch.FloatTensor(rewards).to(self.device),
            torch.FloatTensor(not_dones).to(self.device),
        )

    

    def _add(self, sample):
        """
        将经验样本与 Bernoulli 掩码拼接后添加到缓冲区
        
        Args:
            sample: 经验元组 (obs, action, reward, done, next_obs, agent_state, step_info)
        """
        
        # 检查 sample 是否为嵌套元组
        if len(sample) == 1 and isinstance(sample[0], tuple):
            # 如果是嵌套的，解包内层元组
            inner_sample = sample[0]
            # 将掩码添加到经验元组的末尾
            # inner_sample[4] - inner_sample[0] 相当于：state_next_state
            extended_sample = inner_sample + (inner_sample
                                              [4] - inner_sample[0],)
        else:
            # 如果不是嵌套的，直接添加掩码
            extended_sample = sample + (sample[4] - sample[0],)
        
        # 调用父类方法添加扩展后的样本
        super()._add(extended_sample)


    
    def state_dict(self):
        state = {
            "buffer": self.buffer,
            "capacity": self.capacity,
            "pos": self.pos
        }
        return state

    def load_state_dict(self, state):
        self.buffer = state["buffer"]
        self.capacity = state["capacity"]
        self.pos = state["pos"]



def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[23:210, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

import collections

class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)
    

class ScaledStateWrapper(gym.ObservationWrapper):
    """
    Scales the observation space to [-1,1]
    看起来是要将观察缩放到[-1,1]范围内
    """

    def __init__(self, env):
        super(ScaledStateWrapper, self).__init__(env)
        obs = env.observation_space
        self.compound = False
        self.low = None
        self.high = None
        print(type(obs))
        print(obs)
        if isinstance(obs, gym.spaces.Box):
            # 根据不同的观察空间类型，提取不同的低高值
            self.low = env.observation_space.low # 观察空间的低值
            self.high = env.observation_space.high # 观察空间的高值
            self.observation_space = gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
                                                    dtype=np.float32) # 创建一个新的观察空间，范围为[-1,1]
        elif isinstance(obs, Tuple):
            # 如果是元组类型的观察空间，假设第一个元素是Box类型，第二个元素是Discrete类型
            # todo 元组类型的第二个元素是干啥的？
            self.low = obs.spaces[0].low
            self.high = obs.spaces[0].high
            assert len(obs.spaces) == 2 and isinstance(obs.spaces[1], gym.spaces.Discrete)
            self.observation_space = Tuple(
                (gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
                                dtype=np.float32),
                 obs.spaces[1]))
            self.compound = True # 这个是干啥的？
        else:
            raise Exception("Unsupported observation space type: %s" % self.observation_space)

    def scale_state(self, state):
        state = 2. * (state - self.low) / (self.high - self.low) - 1.
        return state

    def _unscale_state(self, scaled_state):
        state = (self.high - self.low) * (scaled_state + 1.) / 2. + self.low
        return state

    def observation(self, obs):
        if self.compound:
            state, steps = obs
            ret = (self.scale_state(state), steps)
        else:
            ret = self.scale_state(obs)
        return ret
    

class ScaledParameterisedActionWrapper(gym.ActionWrapper):
    """
    Changes the scale of the continuous action parameters to [-1,1].
    Parameter space must be flattened!

    看起来这里是将动作空间中的连续参数缩放到[-1,1]范围内
    这里假设动作空间是扁平化的，即参数空间不是嵌套的Tuple，而是直接展开的Box列表

    Tuple((
        Discrete(n),
        Box(c_1),
        Box(c_2),
        ...
        Box(c_n)
        )
    """

    def __init__(self, env):
        super(ScaledParameterisedActionWrapper, self).__init__(env)
        self.old_as = env.action_space # 获取旧的动作空间 0 是离散动作空间，1到n是连续动作参数空间
        self.num_actions = self.old_as[0].n # 离散动作的数量
        self.high = [self.old_as.spaces[i].high for i in range(1, self.num_actions + 1)] # 每个离散动作对应的参数空间的高值
        self.low = [self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)] # 每个离散动作对应的参数空间的低值
        self.range = [self.old_as.spaces[i].high - self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)] # 每个参数空间的范围
        # 这里创建一个新的动作空间，将连续动作的参数空间的范围被缩放到[-1,1]
        new_params = [  # parameters
            Box(-np.ones(self.old_as.spaces[i].low.shape), np.ones(self.old_as.spaces[i].high.shape), dtype=np.float32)
            for i in range(1, self.num_actions + 1)
        ]
        # 构建新的动作空间
        # self.action_space[0]是离散动作空间
        # 后面的Box是缩放后的连续参数空间
        self.action_space = Tuple((
            self.old_as.spaces[0],  # actions
            *new_params,
        ))

    def action(self, action):
        """
        Rescale from [-1,1] to original action-parameter range.

        :param action:
        :return:
        """
        action = copy.deepcopy(action)
        p = action[0]
        action[1][p] = self.range[p] * (action[1][p] + 1) / 2. + self.low[p]
        return action


"""
Gym Hybrid 环境的 Wrapper 工具集
"""
import gym
import numpy as np
from gym import spaces
from typing import Tuple


class NormalizeActionWrapper(gym.ActionWrapper):
    """
    将混合动作空间归一化到 [-1, 1] 的 Wrapper
    
    原始动作空间 (BaseEnv):
        - 离散部分: Discrete(3) -> {0, 1, 2}
        - 连续部分: Box([0, -1], [1, 1])
    
    归一化后动作空间:
        - 离散部分: 保持不变 Discrete(3)
        - 连续部分: Box([-1, -1], [1, 1])
    
    使用示例:
        env = gym.make('Moving-v0')
        env = NormalizeActionWrapper(env)
        
        # 现在可以使用归一化的动作
        action = (1, np.array([-0.5, 0.8]))  # 所有连续值都在 [-1, 1]
        obs, reward, terminated, truncated, info = env.step(action)
    """
    
    def __init__(self, env: gym.Env):
        """
        初始化归一化 Wrapper
        
        Args:
            env: 原始环境实例
        """
        super().__init__(env)
        
        # 获取原始动作空间的信息
        if not isinstance(env.action_space, spaces.Tuple):
            raise ValueError("此 Wrapper 只支持 Tuple 类型的混合动作空间")
        
        discrete_space = env.action_space[0]
        continuous_space = env.action_space[1]
        
        if not isinstance(discrete_space, spaces.Discrete):
            raise ValueError("混合动作空间的第一个元素必须是 Discrete 类型")
        
        if not isinstance(continuous_space, spaces.Box):
            raise ValueError("混合动作空间的第二个元素必须是 Box 类型")
        
        # 保存原始连续空间的范围
        self.original_low = continuous_space.low.copy()
        self.original_high = continuous_space.high.copy()
        
        # 创建归一化后的动作空间: 连续部分都映射到 [-1, 1]
        normalized_continuous_space = spaces.Box(
            low=np.array([-1.0] * continuous_space.shape[0], dtype=np.float32),
            high=np.array([1.0] * continuous_space.shape[0], dtype=np.float32),
            dtype=np.float32
        )
        
        # 更新动作空间
        self.action_space = spaces.Tuple((discrete_space, normalized_continuous_space))
        
        print(f"[NormalizeActionWrapper] 原始连续空间范围: [{self.original_low}, {self.original_high}]")
        print(f"[NormalizeActionWrapper] 归一化后范围: [[-1, -1], [1, 1]]")
    
    def action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        将归一化的动作 [-1, 1] 映射回原始动作空间
        
        映射公式:
            original_value = low + (normalized_value + 1) * (high - low) / 2
        
        其中:
            - normalized_value ∈ [-1, 1]
            - original_value ∈ [low, high]
        
        Args:
            action: 归一化后的动作 (discrete_id, normalized_continuous_params)
        
        Returns:
            原始动作空间的动作 (discrete_id, original_continuous_params)
        """
        discrete_action, normalized_continuous = action
        
        # 确保是 numpy 数组
        normalized_continuous = np.array(normalized_continuous, dtype=np.float32)
        
        # 裁剪到 [-1, 1] 范围（防止数值误差）
        normalized_continuous = np.clip(normalized_continuous, -1.0, 1.0)
        
        # 线性映射: [-1, 1] -> [original_low, original_high]
        original_continuous = self.original_low + \
            (normalized_continuous + 1.0) * (self.original_high - self.original_low) / 2.0
        
        return (discrete_action, original_continuous)
    
    def reverse_action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        将原始动作空间的动作映射到归一化空间（用于调试或逆向操作）
        
        映射公式:
            normalized_value = 2 * (original_value - low) / (high - low) - 1
        
        Args:
            action: 原始动作空间的动作
        
        Returns:
            归一化后的动作
        """
        discrete_action, original_continuous = action
        
        original_continuous = np.array(original_continuous, dtype=np.float32)
        
        # 线性映射: [original_low, original_high] -> [-1, 1]
        normalized_continuous = 2.0 * (original_continuous - self.original_low) / \
            (self.original_high - self.original_low) - 1.0
        
        # 裁剪到 [-1, 1]
        normalized_continuous = np.clip(normalized_continuous, -1.0, 1.0)
        
        return (discrete_action, normalized_continuous)


class NormalizeHardMoveActionWrapper(gym.ActionWrapper):
    """
    将 HardMoveEnv 的混合动作空间归一化到 [-1, 1] 的 Wrapper
    
    原始动作空间 (HardMoveEnv, num_actuators=4):
        - 离散部分: Discrete(16) -> {0, 1, ..., 15}
        - 连续部分: Box([-1, -1, -1, -1], [1, 1, 1, 1])
    
    由于 HardMoveEnv 的连续空间已经是 [-1, 1]，此 Wrapper 主要用于:
    1. 统一接口
    2. 提供额外的裁剪和验证功能
    3. 与其他环境保持一致的 Wrapper 使用方式
    """
    
    def __init__(self, env: gym.Env):
        super().__init__(env)
        
        if not isinstance(env.action_space, spaces.Tuple):
            raise ValueError("此 Wrapper 只支持 Tuple 类型的混合动作空间")
        
        print(f"[NormalizeHardMoveActionWrapper] HardMove 环境的连续空间已经归一化到 [-1, 1]")
    
    def action(self, action: Tuple[int, np.ndarray]) -> Tuple[int, np.ndarray]:
        """
        对 HardMoveEnv，主要进行裁剪操作
        """
        discrete_action, continuous_params = action
        
        continuous_params = np.array(continuous_params, dtype=np.float32)
        continuous_params = np.clip(continuous_params, -1.0, 1.0)
        
        return (discrete_action, continuous_params)


class VerboseActionWrapper(gym.Wrapper):
    """
    打印动作信息的调试 Wrapper
    """
    
    def __init__(self, env: gym.Env, print_every: int = 1):
        super().__init__(env)
        self.print_every = print_every
        self.step_count = 0
    
    def step(self, action):
        self.step_count += 1
        
        if self.step_count % self.print_every == 0:
            discrete_action, continuous_params = action
            print(f"\n[Step {self.step_count}] 动作信息:")
            print(f"  离散动作ID: {discrete_action}")
            print(f"  连续参数: {continuous_params}")
            
            # 解析动作类型（针对 BaseEnv）
            if hasattr(self.env, 'action_space') and isinstance(self.env.action_space[0], spaces.Discrete):
                if self.env.action_space[0].n == 3:
                    action_names = {0: "ACCELERATE", 1: "TURN", 2: "BREAK"}
                    print(f"  动作类型: {action_names.get(discrete_action, 'UNKNOWN')}")
        
        return self.env.step(action)
    
    def reset(self, **kwargs):
        self.step_count = 0
        return self.env.reset(**kwargs)


def wrap_dqn(env_name, episodic_life = False, gray = True, clip_reward = False, stack_frames = 4):
    import gym as old_gym  # 使用旧版gym来创建环境
    env = old_gym.make(env_name)
    env = ScaledStateWrapper(env)  # 将观察空间缩放到[-1,1]范围内
    env = NormalizeActionWrapper(env)  # 将动作空间归一化到[-1,1]范围内

    # 包装成gymnasium.Env以兼容ptan
    env = GymToGymnasiumWrapper(env)
    
    # 由于ptan的wrapper与gym版本不兼容，我们简化包装，只使用必要的wrapper
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # if episodic_life:
    #     # 将多条生命的游戏模拟成单条生命ActorCriticAgent
    #     env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # # 增强初始化
    # env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    # if 'FIRE' in env.unwrapped.get_action_meanings():
    #     env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    # env = ProcessFrame(env, gray=gray, size=84)
    # env = ptan.common.wrappers.ImageToPyTorch(env)
    # env = FrameStack(env, stack_frames)
    # if clip_reward:
    #     env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(console)

    return logger
    

def build_parser():
    parser = argparse.ArgumentParser()

    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='MrDo', help="Name of the run")
    parser.add_argument("--policy", default="P-TD3")  # Policy name (TD3, DDPG or OurDDPG)
    parser.add_argument("--env", default='simple_move_4_direction')  # platform goal HFO
    parser.add_argument("--seed", default=0, type=int)  # Sets Gym, PyTorch and Numpy seeds
    parser.add_argument("--start_timesteps", default=128, type=int)  # Time steps initial random policy is used
    parser.add_argument("--eval_freq", default=2500, type=int)  # How often (time steps) we evaluate
    parser.add_argument("--max_episodes", default=10000, type=int)  # Max time steps to run environment
    parser.add_argument("--max_embedding_episodes", default=1e5, type=int)  # Max time steps to run environment
    parser.add_argument("--max_timesteps", default=1000000, type=float)  # Max time steps to run environment for

    parser.add_argument("--epsilon_steps", default=1000, type=int)  # Max time steps to epsilon environment
    parser.add_argument("--expl_noise_initial", default=1.0)  # Std of Gaussian exploration noise 1.0
    parser.add_argument("--expl_noise", default=0.1)  # Std of Gaussian exploration noise 0.1

    parser.add_argument("--relable_steps", default=1000, type=int)  # Max time steps relable
    parser.add_argument("--relable_initial", default=1.0)  #
    parser.add_argument("--relable_final", default=0.0)  #

    parser.add_argument("--batch_size", default=128, type=int)  # Batch size for both actor and critic
    parser.add_argument("--discount", default=0.99)  # Discount factor
    parser.add_argument("--tau", default=0.005)  # Target network update rate
    parser.add_argument("--policy_noise", default=0.1)  # Noise added to target policy during critic update
    parser.add_argument("--noise_clip", default=0.5)  # Range to clip target policy noise
    parser.add_argument("--policy_freq", default=2, type=int)  # Frequency of delayed policy updates
    parser.add_argument("--save_model", action="store_true")  # Save model and optimizer parameters
    parser.add_argument("--load_model", default="")  # Model load file name, "" doesn't load, "default" uses file_name
    parser.add_argument("--actor_lr", default=1e-4, type=int)
    parser.add_argument("--critic_lr", default=1e-3, type=int)
    parser.add_argument("--tau_actor", default=0.005, type=int)
    parser.add_argument("--tau_critic", default=0.005, type=int)

    return parser


def unpack_batch(batch, device='cpu'):
    states = []
    actions = []
    next_states = []
    rewards = []
    dones = []
    masks = []

    for exp in batch:
        states.append(exp[0])
        actions.append(exp[1])
        next_states.append(exp[4])
        rewards.append(exp[2])
        dones.append(exp[3])
        masks.append(exp[7])

    states_t = torch.tensor(np.array(states), dtype=torch.float).to(device=device)
    actions_t = torch.tensor(np.array(actions), dtype=torch.long).to(device=device).unsqueeze(1)
    rewards_t = torch.tensor(np.array(rewards), dtype=torch.float).to(device=device)
    next_states_t = torch.tensor(np.array(next_states), dtype=torch.float).to(device=device)
    dones_t = torch.tensor(np.array(dones), dtype=torch.long).to(device=device).unsqueeze(1)
    masks_t = torch.tensor(np.array(masks), dtype=torch.float).to(device=device)


    return states_t, actions_t, rewards_t, next_states_t, dones_t, masks_t



def pairwise_distances(x, y):
    '''
    Input: x is a Nxd matrix 输入的离散动作的嵌入向量
           y is a Mxd matirx 输入的是vae embedding矩阵
    Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
    i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
    
    计算负欧氏距离作为相似度分数
    Advantage: Less memory requirement O(M*d + N*d + M*N) instead of O(N*M*d)
    Computationally more expensive? Maybe, Not sure.
    adapted from: https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/2
    '''
    # print("x",x)
    # print("y",y)

    x_norm = (x ** 2).sum(1).view(-1, 1)   #sum(1)将一个矩阵的每一行向量相加
    y_norm = (y ** 2).sum(1).view(1, -1)
    # print("x_norm",x_norm)
    # print("y_norm",y_norm)
    y_t = torch.transpose(y, 0, 1)  #交换一个tensor的两个维度
    # a^2 + b^2 - 2ab= (a-b)^2 利用自动广播机制计算输入动作嵌入与所有可能动作嵌入的相似度
    dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)    #torch.mm 矩阵a和b矩阵相乘
    # dist[dist != dist] = 0 # replace nan values with 0
    # print("dist",dist)
    return dist