import sys
import time
import numpy as np
from collections import deque

import torch
import torch.nn as nn
import torch.nn.functional as F
import ptan

HID_SIZE = 128


def preprocess_obs(obs, bits=5):
    """Preprocessing image, see https://arxiv.org/abs/1807.03039."""
    '''
    2. 为什么需要这些处理？
    (1) 量化
    在强化学习中，输入的图像数据通常是高维的（如 84x84x3 的像素值）。
    通过量化，可以减少数据的表示复杂度，同时保留足够的信息。
    (2) 归一化
    归一化到 [0, 1] 的范围可以避免数值过大导致的梯度爆炸问题。
    归一化后的数据更适合输入到神经网络中。
    (3) 添加随机噪声
    添加噪声可以减少量化引入的离散性，使数据更平滑。
    这类似于数据增强的效果，有助于提高模型的泛化能力。
    (4) 中心化
    将数据中心化到 [-0.5, 0.5] 的范围可以让数据的均值接近 0。
    这对使用批归一化（Batch Normalization）或零均值初始化的神经网络非常重要，有助于加速收敛。
    '''
    bins = 2**bits
    assert obs.dtype == torch.float32
    if bits < 8:
        # 将图像数据从 8 位（通常是 0-255 的像素值）量化到指定的位数（bits）。
        # 例如，如果 bits=5，则将像素值从 0-255 映射到 0-31 的范围
        obs = torch.floor(obs / 2**(8 - bits))
    # 归一化到 [0, 1]
    obs = obs / bins
    # 添加随机噪声
    # 在归一化后的像素值上添加均匀分布的随机噪声，噪声的范围为 ([0, 1/bins])
    # 这一步是为了避免量化引入的离散性，增加数据的平滑性
    obs = obs + torch.rand_like(obs) / bins
    # 中心化到 [-0.5, 0.5]
    # 这样可以让数据的均值接近 0，有助于神经网络的训练
    obs = obs - 0.5
    return obs


def soft_update_params(net, target_net, tau):
    for param, target_param in zip(net.parameters(), target_net.parameters()):
        target_param.data.copy_(
            tau * param.data + (1 - tau) * target_param.data
        )


def unpack_batch_sac_curl(batch, device="cpu"):
    '''
    解压深度确定性策略梯度网络的数据
    '''

    states, actions, rewards, dones, not_dones, last_states = [], [], [], [], [], []
    for exp in batch:
        states.append(exp.state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        not_dones.append(not dones[-1])
        if exp.last_state is None:
            last_states.append(np.array(exp.state))
        else:
            last_states.append(np.array(exp.last_state))
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    # 在这里，如果[]里面是[tensor()...]，那么在这里转换的时候是没办法自动识别正确的维度
    # 原本应该是(batch_size, 1)， 结果被推到成(batch_size,)，造成后续的torch cat错误
    actions_v = torch.FloatTensor(np.array(actions)).to(device)
    rewards_v = ptan.agent.float32_preprocessor(rewards).unsqueeze(-1).to(device)
    last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
    dones_t = torch.BoolTensor(dones).unsqueeze(-1).to(device)
    not_dones_t = torch.BoolTensor(not_dones).unsqueeze(-1).to(device)
    return states_v, actions_v, rewards_v, dones_t, not_dones_t, last_states_v

class BipedalWalkerModelA2C(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        obs_size: 观测的数据维度，不是图像数据，所以后续用的是全连接层
        act_size: 动作空间的维度，在这个游戏里面，指的是同时执行动作的数量
        '''

        super(BipedalWalkerModelA2C, self).__init__()

        # 只有这个是提取特征，其余的都是输出结果
        self.base = nn.Sequential(
            nn.Linear(obs_size[0], HID_SIZE),
            nn.ReLU(),
        )
        # 输出均值，表示最终要执行的动作内容
        # 这里由于预测的输出动作包含负值，所以采用tanh函数，将输出值限制在-1到1之间
        # 而不是用sigmoid
        self.mu = nn.Sequential(
            nn.Linear(HID_SIZE, act_size[0]),
            nn.Tanh(),
        )
        # https: // zhuanlan.zhihu.com / p / 461707201
        # var作用 方差平方，怀疑是用来稳定输出的概率范围大小
        # 用来促进网络进行探索以及指导网路朝哪个方向进行训练，使得整体趋近中值，但是在这里并没有直接使用方差，而是使用了信息熵的方式
        self.var = nn.Sequential(
            nn.Linear(HID_SIZE, act_size[0]),
            nn.Softplus(), # Relu的替代函数，用于解决梯度消失问题 具体使用场景查看笔记内容
        )
        # 状态值（Q值），用来评价当前Q值，来评估当前执行的动作是否有优势
        self.value = nn.Linear(HID_SIZE, 1)

    def forward(self, x):
        '''
        return 均值，方差平方，Q值
        '''
        base_out = self.base(x)
        return self.mu(base_out), self.var(base_out), self.value(base_out)


class RewardTracker:
    def __init__(self, writer, stop_reward):
        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        self.total_rewards.append(reward)
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        self.ts_frame = frame
        self.ts = time.time()
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


def unpack_batch_a2c(batch, net, last_val_gamma, device="cpu"):
    """
    Convert batch into training tensors
    :param batch: 收集的游戏数据
    :param net:
    :return: states variable, actions tensor, reference values variable（游戏环境状态、执行的动作、评价的Q值）
    """
    states = [] # 每一步的游戏状态
    actions = [] # 每一步执行的动作
    rewards = [] # 每一步执行动作后获取的奖励
    not_done_idx = [] # 执行动作后没有结束游戏的索引
    last_states = [] # 执行动作后的下一个状态
    for idx, exp in enumerate(batch):
        states.append(exp.state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(exp.last_state)
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    actions_v = torch.FloatTensor(np.array(actions)).to(device)

    # handle rewards
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        # 如果存在下一个状态的游戏数据，那么计算Q值
        # 对于已经结束的游戏动作，他们的Q值
        last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
        last_vals_v = net(last_states_v)[2]
        # todo 为什么这里计算的Q值没有选择最大值
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        rewards_np[not_done_idx] += last_val_gamma * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)
    return states_v, actions_v, ref_vals_v

def unpack_batch_ddqn(batch, device="cpu"):
    '''
    解压深度确定性策略梯度网络的数据
    '''
    # states: 环境状态
    # actions: 执行的动作
    # rewards： 执行动作后获取的奖励
    # dones: 执行动作后游戏是否结束
    # last_states: 未结束的游戏，执行动作后的达到的状态（针对多步展开，则是展开的最后一个动作）；如果是游戏已经结束的状态，则保存的还是和states中一样的状态，如果不是游戏结束的状态，则保存执行动作后的下一个状态
    states, actions, rewards, dones, last_states = [], [], [], [], []
    for exp in batch:
        states.append(exp.state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        if exp.last_state is None:
            last_states.append(exp.state)
        else:
            last_states.append(exp.last_state)
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    actions_v = ptan.agent.float32_preprocessor(actions).to(device)
    rewards_v = ptan.agent.float32_preprocessor(rewards).to(device)
    last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
    dones_t = torch.ByteTensor(dones).to(device)
    return states_v, actions_v, rewards_v, dones_t, last_states_v

def unpack_batch_acktr(batch, net, last_val_gamma, device="cpu"):
    """
    Convert batch into training tensors
    :param batch: 收集的游戏数据
    :param net:
    :return: states variable, actions tensor, reference values variable（游戏环境状态、执行的动作、评价的Q值）
    """
    states = [] # 每一步的游戏状态
    actions = [] # 每一步执行的动作
    rewards = [] # 每一步执行动作后获取的奖励
    not_done_idx = [] # 执行动作后没有结束游戏的索引
    last_states = [] # 执行动作后的下一个状态
    for idx, exp in enumerate(batch):
        states.append(exp.state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(exp.last_state)
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    actions_v = torch.FloatTensor(np.array(actions)).to(device)

    # handle rewards
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        # 如果存在下一个状态的游戏数据，那么计算Q值
        # 对于已经结束的游戏动作，他们的Q值
        last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
        last_vals_v = net(last_states_v)
        # 为什么这里计算的Q值没有选择最大值
        # 因为评级网络是直接输出最大Q值，所以这里直接取值就可以了
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        # 这里依旧是计算Q值
        rewards_np[not_done_idx] += last_val_gamma * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)
    return states_v, actions_v, ref_vals_v

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

import os

def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))

def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[2].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


import os.path
import time
from typing import Any
import numpy as np
import sys
import torch
import torch.nn as nn
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
import cv2
from PIL import Image
from typing import Iterable
from torch.nn import Module


def lambda_return(imged_reward, value_pred, bootstrap, discount=0.99, lambda_=0.95):
    # Setting lambda=1 gives a discounted Monte Carlo return.
    # Setting lambda=0 gives a fixed 1-step return.
    # bootstrap=value_predp[-1]
    # next_value相当于吧value_pred向后移动一位，然后最后一个值用bootstrap填充
    next_values = torch.cat([value_pred[1:], bootstrap[None]], 0)
    # 得到每一个step的discount
    discount_tensor = discount * torch.ones_like(imged_reward)  # pcont
    # 这里有点像bellman公式了，计算出来的是每一步的return，在PPO中可以看到
    # （1 - lambda_）平衡蒙特卡罗回报和时间差分（TD）回报
    inputs = imged_reward + discount_tensor * next_values * (1 - lambda_)
    last = bootstrap
    indices = reversed(range(len(inputs)))
    outputs = []
    # 倒序遍历，这边是在模仿轨迹中计算return
    for index in indices:
        inp, disc = inputs[index], discount_tensor[index]
        last = inp + disc * lambda_ * last
        outputs.append(last)
    # 将顺序反转，变回正序
    outputs = list(reversed(outputs))
    outputs = torch.stack(outputs, 0)
    returns = outputs
    return returns


class FreezeParameters:
    def __init__(self, modules: Iterable[torch.nn.Module]):
        """
        Context manager to locally freeze gradients.
        In some cases with can speed up computation because gradients aren't calculated for these listed modules.
        example:
        ```
        with FreezeParameters([module]):
            output_tensor = module(input_tensor)
        ```
        :param modules: iterable of modules. used to call .parameters() to freeze gradients.
        """
        self.modules = modules
        self.param_states = [p.requires_grad for p in get_parameters(self.modules)]

    def __enter__(self):
        for param in get_parameters(self.modules):
            param.requires_grad = False

    def __exit__(self, exc_type, exc_val, exc_tb):
        for i, param in enumerate(get_parameters(self.modules)):
            param.requires_grad = self.param_states[i]


def preprocess_observation_(observation, bit_depth):
    '''
    observation: 观察rgb图像
    bit_depth: 图像的位深度，比如8位深图等
    '''
    # 将图像转换为[-0.5, 0.5]
    observation.div_(2 ** (8 - bit_depth)).floor_().div_(2**bit_depth).sub_(
        0.5
    )  # Quantise to given bit depth and centre

    # 对量化后的图像增加一个随机噪声
    # todo
    # observation.add_(torch.rand_like(observation).div_(2**bit_depth))：在量化后的值上加上一个随机噪声，噪声的范围是 [0, 1 / 2**bit_depth]，这相当于对量化后的值进行去量化处理，使其更接近连续图像的概率密度函数（PDF）。
    observation.add_(
        torch.rand_like(observation).div_(2**bit_depth)
    )  # Dequantise (to approx. match likelihood of PDF of continuous images vs. PMF of discrete images)


def get_parameters(modules: Iterable[Module]):
    """
    Given a list of torch modules, returns a list of their parameters.
    :param modules: iterable of modules
    :returns: a list of parameters
    """
    model_parameters = []
    for module in modules:
        model_parameters += list(module.parameters())
    return model_parameters


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


def explained_variance(ypred, y):
    try:
        assert y.ndim == 1 and ypred.ndim == 1
    except:
        assert y.shape == ypred.shape
        y = y.flatten()
        ypred = ypred.flatten()
        assert y.ndim == 1 and ypred.ndim == 1
    vary = np.var(y)
    return np.nan if vary == 0 else 1 - np.var(y - ypred) / vary


import collections

class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)



class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        global count_frame
        x_t = img
        # ProcessFrame84.save_state_as_image(x_t, r'state_image.png')
        x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114
        x_t = cv2.resize(x_t, (84, 84), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [84, 84, 1])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)



def tie_weights(src, trg):
    assert type(src) == type(trg)
    trg.weight = src.weight
    trg.bias = src.bias
    


def weight_init(m):
    """Custom weight init for Conv2D and Linear layers."""
    if isinstance(m, nn.Linear):
        nn.init.orthogonal_(m.weight.data)
        m.bias.data.fill_(0.0)
    elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        # delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
        assert m.weight.size(2) == m.weight.size(3)
        m.weight.data.fill_(0.0)
        m.bias.data.fill_(0.0)
        mid = m.weight.size(2) // 2
        gain = nn.init.calculate_gain('relu')
        nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0
        self.non_reward_frames = 0
        self.non_reward_frames_limit = 100
        self.non_reward_loss = 0.01

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
               
        # # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward = -self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, stack_frames=4):
    env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=2)
    env = Ram2RGBWrapper(env, n_frames=stack_frames, obs_size=84, gray=True)
    # env = TransposeObservation(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    return env


def wrap_dqn_sync(env_id, render_mode="rgb_array", stack_frames=4):

    def thunk():
        env = gym.make(env_id, render_mode="rgb_array")
        env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=2)
        env = Ram2RGBWrapper(env, n_frames=stack_frames, obs_size=84, gray=True)
        # env = TransposeObservation(env)
        env = ptan.common.wrappers.ImageToPyTorch(env)
    return thunk


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


class DecayActionSelector(ptan.actions.ActionSelector):
    """
    Converts probabilities of actions into action by sampling them
    概率动作选择器
    """
    def __init__(self, start_epsilon=0.9, final_epsilon=0.0005, decay_epsilon=1e-8, start_action_selector=ptan.actions.ProbabilityActionSelector(), final_action_selector=ptan.actions.ArgmaxActionSelector()):
        self.start_epsilon = start_epsilon
        self.final_epsilon = final_epsilon
        self.cur_epsilon = self.start_epsilon
        self.decay_epsilon = decay_epsilon
        self.start_selector = start_action_selector
        self.final_selector = final_action_selector
        self.steps = 0

    def __call__(self, probs):
        assert isinstance(probs, np.ndarray)
        self.cur_epsilon = max(self.final_epsilon, self.start_epsilon - self.decay_epsilon * self.steps)
        self.steps += 1

        if self.cur_epsilon < self.final_epsilon:
            return self.final_selector(probs)
        elif np.random.random() > self.cur_epsilon:
            return self.start_selector(probs)
        else:
            actions = []
            for prob in probs:
                actions.append(np.random.randint(0, len(prob)))
            # 根据传入的动作，随机选择动作，而不是用概率选择
            return np.array(actions)

    def state_dict(self):
        return {
            "cur_epsilon": self.cur_epsilon,
            "steps": self.steps
        }

    def load_state_dict(self, state_dict):
        self.cur_epsilon = state_dict["cur_epsilon"]
        self.steps = state_dict["steps"]


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss

def unpack_batch(batch):
    '''
    将经验重放缓冲区的经验进行解包操作，从经验中获取各个经验类别
    '''

    # states 每次采集样本时的环境状态
    # actions 每次状态下执行的动作值
    # rewards 每次执行完动作后获取的激励值
    # dones 执行动作后游戏是否结束
    # last_states 执行动作之后的下一个状态
    #
    # return 将states, actions, rewards, dones, last_states各个list转换为numpy
    states, actions, rewards, dones, last_states = [], [], [], [], []
    for exp in batch:
        state = np.asarray(exp.state)
        states.append(state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        if exp.last_state is None:
            last_states.append(state)       # the result will be masked anyway
        else:
            last_states.append(np.asarray(exp.last_state))
    return np.asarray(states), np.array(actions), np.array(rewards, dtype=np.float32), \
           np.array(dones, dtype=np.uint8), np.asarray(last_states)



def calc_loss_dqn(batch, net, tgt_net, gamma, device="cpu"):
    '''
    计算dqn的损失值
    net: 负责计算当前状态下的动作Q值
    tag_net: 负责计算下一个状态下的动作Q值
    '''
    # 提取样本集中游戏的各个状态和动作
    states, actions, rewards, dones, next_states = unpack_batch(batch)

    # 将数据传输到指定的设备中
    states_v = torch.tensor(states).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.ByteTensor(dones).to(device)

    # 将状态传输到神经网路中，获取到神经网路推理出来的执行的动作
    # 网络细节查看第06章 02_dqn_pong.py
    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    # tag_net负责计算下一个状态的Q值
    # 并将其中会导致游戏结束的动作的Q值设置为0，这样可以将不好的q值降低
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask.bool()] = 0.0

    # 损失值计算：下一个状态的最大Q值+实际获取到的激励值 == 当前net计算出来的Q值
    expected_state_action_values = next_state_values.detach() * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)


class EpsilonTracker:
    def __init__(self, epsilon_greedy_selector, epsilon_start, epsilon_final, epsilon_frames):
        '''
        这个跟踪器是根据当前执行的帧数大小，以及epslison的参数来计算动作选择其中
        选择动作的概率

        param epsilon_greedy_selector: 动作选择器
        param params: map类型的参数，也就是本文中的HYPERPARAMS参数
        '''

        self.epsilon_greedy_selector = epsilon_greedy_selector
        self.epsilon_start = epsilon_start
        self.epsilon_final = epsilon_final
        self.epsilon_frames = epsilon_frames
        self.frame(0)

    def frame(self, frame):
        '''
        根据当前的帧数，更新epsilon的值，eplison是用来决定每次执行的策略是
        随机还是神经网络推理获取的
        '''
        self.epsilon_greedy_selector.epsilon = \
            max(self.epsilon_final, self.epsilon_start - frame / self.epsilon_frames)
        

"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


class EnvSampleAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, env, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.env = env
        self.device = device


    @torch.no_grad()
    def __call__(self, observation, agent_states=None):

        return [self.env.action_space.sample()], agent_states
    


class ContinuousRandomAgent(ptan.agent.BaseAgent):

    def __init__(self, net, preprocessor=ptan.agent.default_states_preprocessor, device="cpu"):
        super().__init__()

        self.net = net
        self.preprocessor = preprocessor
        self.device = device

    
    def initial_state(self):
        return None
    

    @torch.no_grad()
    def __call__(self, observation, agent_states=None):
        if agent_states is None:
            agent_states = [None] * len(observation)
        # 如果定义了预处理器，则进行预处理擦欧总
        if self.preprocessor is not None:
            observation = self.preprocessor(observation)
            if torch.is_tensor(observation):
                observation = observation.to(self.device)
        # 计算动作概率
        action = self.net(observation)
        return np.array(action.cpu().numpy()), agent_states



def default_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model. By default we assume Variable
    :param states: list of numpy arrays with states
    :return: Variable
    这个预处理器的方法就是将list转换为矩阵的形式
    如果state是一维的，那么就将其转换为[1, D]的形式
    如果state是多维的，那么就将其转换为[N, E, D]的形式
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.asarray([np.asarray(s) for s in states])
    
    torch_states = torch.tensor(np_states, dtype=torch.float)
    preprocess_observation_(torch_states, bit_depth=8)
    return torch_states


class UpdateBeliefActAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, actor_model, transition_model, encoder, belief_size, state_size, action_size, explore, action_noise, preprocessor=default_states_preprocessor, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.actor_model = actor_model
        self.transition_model = transition_model
        self.device = device
        self.belief_size = belief_size
        self.state_size = state_size
        self.action_size = action_size
        self.encoder = encoder
        self.explore = explore
        self.action_noise = action_noise
        self.preprocessor = preprocessor


    @torch.no_grad()
    def __call__(self, observation, agent_states=None):

        if agent_states is None or agent_states[0] is None:
            belief, posterior_state, action = (
            torch.zeros(1, self.belief_size, device=self.device),
            torch.zeros(1, self.state_size, device=self.device),
            torch.zeros(1, self.action_size, device=self.device),
        )
        else:
            belief, posterior_state, action = agent_states[0]

        if self.preprocessor is not None:
            observation = self.preprocessor(observation)
            if torch.is_tensor(observation):
                observation = observation.to(self.device)
        belief, _, _, _, posterior_state, _, _ = self.transition_model(
        posterior_state, action.unsqueeze(dim=0), belief, self.encoder(observation).unsqueeze(dim=0)
        )  # Action and observation need extra time dimension
        # 移除信念状态和隐状态中的时间维度。
        belief, posterior_state = belief.squeeze(dim=0), posterior_state.squeeze(
            dim=0
        )  # Remove time dimension from belief/state

        action = self.actor_model.get_action(belief, posterior_state, det=not (self.explore))

        if self.explore:
            # 如果为True则给动作进行采样
            action = torch.clamp(
                torch.distributions.Normal(action, self.action_noise).rsample(), -1, 1
            ) 

        return action.cpu().numpy(), [(belief, posterior_state, action)]
    

def imagine_ahead(prev_state, prev_belief, policy, transition_model, planning_horizon=12):
    '''
    imagine_ahead is the function to draw the imaginary tracjectory using the dynamics model, actor, critic.
    Input: current state (posterior), current belief (hidden), policy, transition_model  # torch.Size([50, 30]) torch.Size([50, 200])
    Output: generated trajectory of features includes beliefs, prior_states, prior_means, prior_std_devs
            torch.Size([49, 50, 200]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30])

    imagine_ahead 是一个函数，用于使用动态模型、演员和评论家绘制想象轨迹。  
    输入：当前状态（后验），当前信念（隐藏），策略，转移模型  # torch.Size([50, 30]) torch.Size([50, 200])  
    输出：生成的特征轨迹，包括信念、先验状态、先验均值、先验标准差  ，而这里不同的就是传入的动作事动作网络预测的动作，和之前是使用真实的动作不同
    torch.Size([49, 50, 200]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30]) torch.Size([49, 50, 30])
    '''
    flatten = lambda x: x.view([-1] + list(x.size()[2:]))
    prev_belief = flatten(prev_belief)
    prev_state = flatten(prev_state)

    # Create lists for hidden states (cannot use single tensor as buffer because autograd won't work with inplace writes)
    T = planning_horizon
    beliefs, prior_states, prior_means, prior_std_devs = (
        [torch.empty(0)] * T,
        [torch.empty(0)] * T,
        [torch.empty(0)] * T,
        [torch.empty(0)] * T,
    )
    beliefs[0], prior_states[0] = prev_belief, prev_state

    # Loop over time sequence
    for t in range(T - 1):
        _state = prior_states[t]
        # 结合信念状态（包含先验前一个动作的特征）和后验状态（传入的事后验状态，包含环境观察的特征）todo
        # 得到预测的动作
        actions = policy.get_action(beliefs[t].detach(), _state.detach())
        # Compute belief (deterministic hidden state)
        # 后验状态结合预测的动作提取特征
        hidden = F.elu(transition_model.fc_embed_state_action(torch.cat([_state, actions], dim=1)))
        # 计算得到下一个信念状态
        beliefs[t + 1] = transition_model.rnn(hidden, beliefs[t])
        # Compute state prior by applying transition dynamics
        # 根据信念状态计算先验状态
        hidden = F.elu(transition_model.fc_embed_belief_prior(beliefs[t + 1]))
        prior_means[t + 1], _prior_std_dev = torch.chunk(transition_model.fc_state_prior(hidden), 2, dim=1)
        prior_std_devs[t + 1] = F.softplus(_prior_std_dev) + transition_model.min_std_dev
        prior_states[t + 1] = prior_means[t + 1] + prior_std_devs[t + 1] * torch.randn_like(prior_means[t + 1])
    # Return new hidden states
    # imagined_traj = [beliefs, prior_states, prior_means, prior_std_devs]
    imagined_traj = [
        torch.stack(beliefs[1:], dim=0),
        torch.stack(prior_states[1:], dim=0),
        torch.stack(prior_means[1:], dim=0),
        torch.stack(prior_std_devs[1:], dim=0),
    ]
    return imagined_traj



class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)
    


class Ram2RGBWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4, obs_size=96, gray=True):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)
        self.obs_size = obs_size
        self.gray = gray

        # 修改观察空间为 RGB 图像空间
        self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.obs_size, self.obs_size, (1 if self.gray else 3) * n_frames), dtype=np.uint8)

        self.obs = []

    def _get_rgb_observation(self):
        # 渲染环境并获取 RGB 图像,s缩放图像并转换为灰度图像
        obs = self.env.render()
        obs = cv2.resize(obs, (self.obs_size, self.obs_size))
        if self.gray:
            obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
            obs = np.expand_dims(obs, axis=-1)
        return obs
    

    def reset(self, **kwargs):
        _, info = self.env.reset(**kwargs)
        rgb_obs = self._get_rgb_observation()
        for _ in range(self.n_frames):
            self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), info
    

    def step(self, action):
        _, reward, terminated, truncated, info = self.env.step(action)
        rgb_obs = self._get_rgb_observation()
        self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), reward, terminated, truncated, info