import os.path
import copy
from typing import Any
import numpy as np
import sys
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import random
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
import math
import time
import wandb

def create_vector_env(num_envs, env_fn):
    if num_envs == 1:
        return gym.vector.SyncVectorEnv([env_fn])
    else:
        return gym.vector.AsyncVectorEnv([env_fn for _ in range(num_envs)])



@torch.no_grad()
def make_grid(tensor, nrow, padding, pad_value=0):
    # modified version of torchvision.utils.make_grid that supports different paddings for x and y
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.size(2) + padding[0]), int(tensor.size(3) + padding[1])
    num_channels = tensor.size(1)
    grid = tensor.new_full(
        (num_channels, height * ymaps + padding[0], width * xmaps + padding[1]), pad_value)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            grid.narrow(1, y * height + padding[0], height - padding[0]) \
                .narrow(2, x * width + padding[1], width - padding[1]) \
                .copy_(tensor[k])
            k = k + 1
    return grid


def to_image(tensor):
    from PIL import Image
    tensor = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8)
    if tensor.shape[2] == 1:
        tensor = tensor.squeeze(2)
    return Image.fromarray(tensor.numpy()).convert('RGB')


def create_reward_transform(transform_type):
    if transform_type == 'tanh':
        def transform(r):
            if torch.is_tensor(r):
                return torch.tanh(r)
            return math.tanh(r)
    elif transform_type == 'clip':
        def transform(r):
            if torch.is_tensor(r):
                return torch.clip(r, -1, 1)
            return np.clip(r, -1, 1)
    elif transform_type == 'none' or transform_type is None:
        def transform(r):
            return r
    else:
        raise ValueError(transform_type)
    return transform



def softmax(x, axis=0):
    """
    NumPy implementation of softmax function
    
    Args:
        x: numpy array
        axis: dimension to compute softmax over
        
    Returns:
        softmax probabilities with same shape as input
    """
    # Subtract max for numerical stability
    # 防止exp后的数值过大导致溢出
    x_max = np.max(x, axis=axis, keepdims=True)
    exp_x = np.exp(x - x_max)
    
    # Compute softmax values
    return exp_x / np.sum(exp_x, axis=axis, keepdims=True)



# def random_choice(n, num_samples, replacement=False, device=None):
#     if replacement:
#         return torch.randint(0, n, (num_samples,), device=device)

#     weights = torch.ones(n, device=device)
#     return torch.multinomial(weights, num_samples, replacement=False)
def random_choice(n, num_samples, replacement=False):
    """
    Args:
        n: 样本空间大小
        num_samples: 需要采样的数量
        replacement: 是否允许重复采样
    Returns:
        numpy.ndarray: 采样结果数组
    """
    if replacement:
        # 有放回采样
        return np.random.randint(0, n, size=num_samples)
    
    # 无放回采样
    return np.random.choice(n, size=num_samples, replace=False)


def preprocess_atari_obs(obs):
    '''
    对观察进行对一化道0～1中
    '''
    return obs / 255.0


class ExperienceRawReplayBuffer:
    def __init__(self, experience_source, buffer_size, env, params):
        '''
        '''
        
        assert isinstance(experience_source, (ptan.experience.ExperienceSource, type(None)))
        assert isinstance(buffer_size, int)

        obs_shape = env.observation_space.shape
        # 将经验池转换为迭代器
        self.experience_source_iter = None if experience_source is None else iter(experience_source)
        self.obs = np.zeros((buffer_size + 1) + obs_shape, dtype=np.float32)  # +1 for next state
        self.actions = np.zeros(buffer_size, dtype=np.int32)
        self.rewards = np.zeros(buffer_size, dtype=np.float32)
        self.terminated = np.zeros(buffer_size, dtype=bool)
        # self.truncated = np.zeros(buffer_size, dtype=bool)
        self.timesteps = np.zeros(buffer_size + 1, dtype=np.int32)
        self.timesteps[0] = 0
        self.sample_visits = torch.zeros(buffer_size, dtype=torch.long, device='cpu')  # we sample indices on cpu
        # sample_vistis应该是每个样本的采样次数

        self.size = 0 # 缓存的大小
        self.total_reward = 0
        self.score = 0
        self.episode_lengths = []
        self.scores = []
        # 这里看起来是在做奖励的缩放，比如使用tanh将奖励缩放到-1~1之间
        self.reward_transform = create_reward_transform(params['env_reward_transform'])
        self.metrics_num_episodes = 0
        # 重放缓冲区的大小
        self.capacity = buffer_size
        # 当前遍历的位置
        self.pos = 0
        self.params = params

    
    def set_experience_source(self, experience_source):
        """
        设置经验源
        :param experience_source: ptan.experience.ExperienceSource 实例
        """
        assert isinstance(experience_source, ptan.experience.ExperienceSource)
        self.experience_source_iter = iter(experience_source)


    def __len__(self):
        return self.size

    def __iter__(self):
        return self.sample(1)
    

    def get_data(self, idx, prefix=None, return_next_obs=False):
        '''
        idx: idx shape is wm_batch_size, wm_sequence_length 采样的索引
        '''
        obs = self._get(self.obs, idx, prefix, return_next_obs)# obs shape is [wm_total_batch_size, wm_sequence_length, h, w, c]
        actions = self._get(self.actions, idx, prefix)# actions shape is [wm_total_batch_size, wm_sequence_length]
        rewards = self._get(self.rewards, idx, prefix)# rewards shape is [wm_total_batch_size, wm_sequence_length]
        terminated = self._get(self.terminated, idx, prefix) # terminated shape is [wm_total_batch_size, wm_sequence_length]
        truncated = self._get(self.terminated, idx, prefix) # terminated shape is [wm_total_batch_size, wm_sequence_length]
        # truncated = self._get(idx, prefix) # truncated shape is [wm_total_batch_size, wm_sequence_length]
        timesteps = self._get(self.timesteps, idx, prefix) # timesteps shape is [wm_total_batch_size, wm_sequence_length]
        return obs, actions, rewards, terminated, truncated, timesteps
    

    def get_obs(self, idx, prefix=0, return_next=False):
        '''
        idx: idx shape is wm_batch_size, wm_sequence_length 采样的索引
        '''
        # return obs : [wm_total_batch_size, frame_stack/wm_sequence_length, h, w, c] 或者 [1 + prefix + wm_total_batch_size + 1, frame_stack, h, w, c]
        # 或 return obs: [1, 1 + prefix + replay.size, frame_stack, h, w, c]
        obs = self._get(self.obs, idx, prefix, return_next=return_next, allow_last=True)
        return preprocess_atari_obs(obs)

    def get_actions(self, idx, prefix=0):
        return self._get(self.actions, idx, prefix, repeat_fill_value=0)  # noop

    def get_rewards(self, idx, prefix=0):
        return self._get(self.rewards, idx, prefix, repeat_fill_value=0.)

    def get_terminated(self, idx, prefix=0):
        return self._get(self.terminated, idx, prefix)

    def get_truncated(self, idx, prefix=0):
        return self._get(self.terminated, idx, prefix)

    def get_timesteps(self, idx, prefix=None):
        return self._get(self.timesteps, idx, prefix, allow_last=True)


    def _get(self, array, idx, prefix=0, return_next=False, repeat_fill_value=None, allow_last=False):
        '''
        array: 需要获取的数组
        idx: 索引，可以是整数、范围、元组、列表或numpy数组，可能为[wm_total_batch_size],可能为【1, replay_buffer.size】

        return: 返回指定idx索引的数据，并且根据prefix和return_next参数进行前面N个数据的获取和后面一个数据的获取，如果索引越界，将越界的数据填充为repeat_fill_value
        '''

        assert prefix >= 0
        squeeze_seq = False # 这里表示是否需要将时序列压缩
        squeeze_batch = False # 这里表示是否需要将批次压缩
        # 最终将idx转换为torch张量
        if isinstance(idx, int):
            idx = np.array([idx])
            squeeze_seq = True # 代表时序列
        if isinstance(idx, range):
            idx = tuple(idx)
        if isinstance(idx, (tuple, list, np.ndarray)):
            idx = np.array(idx)
            # idx shape is 【wm_total_batch_size】

        # 维度至少为2
        if idx.ndim == 1:
            idx = np.expand_dims(idx, axis=0) # shape is [1, wm_total_batch_size]
            squeeze_batch = True

        if prefix > 0 or return_next:
            idx_list = [idx]
            if prefix > 0:
                # 获取前缀索引
                # idx[:, 0] 是每个序列的第一个索引
                # unsqueeze(-1) 将其转换为列向量
                # torch.arange(-prefix, 0, device=idx.device) 生成一个从-prefix到-1的范围
                # prefix_idx < 0 是一个布尔掩码，表示前缀索引是否小于0
                # 如果前缀索引小于0，则将其设置为0
                # 这样可以确保前缀索引不会超出缓冲区的范围
                prefix_idx = idx[:, 0].unsqueeze(-1) + np.arange(-prefix, 0) # prefix_idx shape is [1, 1+prefix]
                prefix_mask = prefix_idx < 0
                # repeat first value, if prefix goes beyond the first value in the buffer
                prefix_idx[prefix_mask] = 0
                # 将生产的前缀索引插入到索引列表的开头
                idx_list.insert(0, prefix_idx)

            if return_next:
                # 这里就是获取下一个索引
                # idx[:, -1] 是每个序列的最后一个索引
                # unsqueeze(-1) 将其转换为列向量
                # suffix_idx = last_idx + 1 是下一个索引
                # suffix_mask 是一个布尔掩码，表示下一个索引是否超出了缓冲区的范围
                # 如果下一个索引超出了缓冲区的范围，则将其设置为最后一个索引
                # 这样可以确保下一个索引不会超出缓冲区的范围
                # allow_last=True 允许下一个索引等于缓冲区的大小
                # allow_last=False 则下一个索引必须小于缓冲区的大小
                # unsqueeze(1) 将其转换为列向量
                # 这样可以确保下一个索引的形状与其他索引一致
                # 最终将下一个索引添加到索引列表的末尾
                last_idx = idx[:, -1] # last_idx shape (1, 1)
                suffix_idx = last_idx + 1 # suffix_idx shape (1, 1)
                # repeat value, if next goes beyond the last value in the buffer
                suffix_mask = (suffix_idx > self.size) if allow_last else (suffix_idx >= self.size)
                suffix_idx = suffix_idx * (~suffix_mask) + last_idx * suffix_mask
                # 将下一个索引添加到索引列表的末尾
                idx_list.append(suffix_idx.unsqueeze(1))

            # 将所有索引列表中的索引沿着第二个维度（dim=1）连接起来
            # idx 0 shape is (1, 1 + prefix)
            # idx 1 shape is (1, wm_total_batch_size)
            # idx 2 shape is (1, 1)
            # cat later shape is (1, 1 + prefix + wm_total_batch_size + 1)
            idx = np.concatenate(idx_list, axis=1)
            # 根据索引从数组中获取数据
            x = array[idx]

            if repeat_fill_value is not None:
                if prefix > 0:
                    # 对于超出前缀范围的索引，将其填充为repeat_fill_value
                    tmp = x[:, :prefix]
                    tmp[prefix_mask] = repeat_fill_value
                    x[:, :prefix] = tmp
                if return_next:
                    # 对于超出后缀范围的索引，将其填充为repeat_fill_value
                    x[suffix_mask, -1] = repeat_fill_value
        else:
            # shape is (1, wm_total_batch_size)
            x = array[idx]

        # 对输入的张量进行处理，应该主要针对的是传入的idx原本只是个整数的情况或者一维数组的情况
        if squeeze_seq:
            x = np.squeeze(x, axis=1)
        if squeeze_batch:
            x = np.squeeze(x, axis=0)
        # x shape [wm_total_batch_size, frame_stack, h, w, c] 或者 [1 + prefix + wm_total_batch_size + 1, frame_stack, h, w, c]
        # 如果传入的idx本身就是二维，那么x shape 可能 is [1, 1 + prefix + wm_total_batch_size, frame_stack, h, w, c]
        return x
    

    def _add(self, sample):
        index = self.size
        state, action, reward, done, next_state, agent_state = sample[0]
        if self.size == 0:
            self.obs[0] = state
        self.obs[index + 1] = next_state
        # 存储缩放后的奖励
        self.rewards[index] = self.reward_transform(reward)
        self.actions[index] = action
        self.terminated[index] = done
        # 如果是终止或者截断，则将时间步数重置为0，否则加1truncated
        # 应该可以用来计算每个episode的长度，区分不同的episode
        self.timesteps[index + 1] = 0 if (done) else (self.timesteps[index] + 1)

        # 增加存储的数据量
        self.size = index + 1
        # 统计总奖励和当前episode的分数
        self.total_reward += reward
        self.score += reward
        if done:
            # 记录着每次episode的长度和分数
            self.episode_lengths.append(self.timesteps[index] + 1)
            self.scores.append(self.score)
            # 重置当前episode的分数
            self.score = 0

    def populate(self, samples):
        """
        Populates samples into the buffer 提取样本到重放缓存区中
        :param samples: how many samples to populate  从样本池中提取多少个样本到缓冲区
        
        算法的原理及利用迭代器根据数量，从经验池中获取数据
        """
        for _ in range(samples):
            entry = next(self.experience_source_iter)
            self._add(entry)

    
    def metrics(self):
        # 收集缓冲区的统计信息
        # 计算缓冲区的大小、总奖励、episode数量和访问熵
        # 如果episode数量超过了metrics_num_episodes，则计算最近episode的平均长度和分数
        num_episodes = len(self.episode_lengths)
        # 指标：缓冲区大小、总奖励、episode数量和访问熵
        # todo 访问熵如何计算来的
        metrics = {'size': self.size, 'total_reward': self.total_reward, 'num_episodes': num_episodes,
                   'visit_ent': self.compute_visit_entropy()}

        # 如果episode数量超过了metrics_num_episodes，则计算最近episode的平均长度步数和分数
        if num_episodes > self.metrics_num_episodes:
            new_episodes = num_episodes - self.metrics_num_episodes
            self.metrics_num_episodes = num_episodes
            metrics.update({'episode_len': compute_mean(self.episode_lengths[-new_episodes:]),
                            'episode_score': np.mean(self.scores[-new_episodes:])})
        return metrics
    

    def visit_histogram(self):
        visits = self.sample_visits[:self.size]
        return self._get_histogram(visits, step=500)
    

    def _get_histogram(self, values, step):
        import wandb
        num_bins = int(math.ceil(self.size / step)) + 1
        bins = np.arange(num_bins) * step
        values = [v.sum().item() for v in torch.split(values, step)]
        return wandb.Histogram(np_histogram=[values, bins])
    

    def sample_probs_histogram(self):
        n = self.size
        visit_probs = self._compute_visit_probs(n)
        return self._get_histogram(visit_probs, step=500)
    

    def _compute_visit_probs(self, n):
        '''
        这个方法计算了经验回放缓冲区中各个样本被采样的概率，目的是实现优先回放机制

        这个方法根据样本被访问的次数，计算每个样本被采样的概率，使用访问次数越少的样本被采样的概率越高，从而实现更均匀的样本探索

        return : 返回一个形状为(n,)的张量，表示每个样本被采样的概率
        '''
        temperature = self.params['buffer_temperature']
        if temperature == 'inf':
            # 如果temperature为'inf'，则使用均匀分布采样，即每个样本的采样概率相等
            visits = self.sample_visits[:n].float()
            visit_sum = visits.sum()
            if visit_sum == 0:
                probs = np.full_like(visits, 1 / n)
            else:
                probs = 1 - visits / visit_sum
        else:
            # 如果temperature不为'inf'，则使用softmax函数计算采样概率
            # 这里的softmax函数将访问次数转换为概率分布，访问次数越少的样本被采样的概率越高
            # logits = -visits
            logits = self.sample_visits[:n].float() / -temperature
            probs = softmax(logits, axis=0)
        return probs
    

    def sample_indices(self, max_batch_size, sequence_length):
        n = self.size - sequence_length + 1 # 计算可采样的索引范围，size - sequence_length + 1
        batch_size = max_batch_size
        if batch_size * sequence_length > n:
            # 这里是为了限制采样的数据不能互相交叉吧？
            raise ValueError('Not enough data in buffer')

        probs = self._compute_visit_probs(n) # shape is (n,)
        # torch.multinomial 是 PyTorch 中用于从给定概率分布中进行采样的函数
        '''
        input: 包含每个类别概率的一维张量
        num_samples: 要抽取的样本数量
        replacement: 是否可以重复抽取

        返回采样得到的起始索引
        '''
        start_idx = np.random.choice(len(probs), size=batch_size, p=probs, replacement=False) # start_idx shape is (batch_size,)

        # stay on cpu
        flat_idx = start_idx.reshape(-1) # flat_dix shape is (batch_size,)
        # 用于返回张量中唯一元素的函数
        '''
        input: 输入张量
        sorted: 是否按升序排列返回的唯一元素（默认为 True）
        return_inverse: 是否返回可用于重建原始张量的索引（默认为 False）
        return_counts: 是否返回每个唯一元素出现的次数（默认为 False）
        dim: 在指定维度上查找唯一元素（默认为 None，表示将张量视为一维）

        返回：
        唯一元素的张量
        唯一元素和对应的逆索引
        唯一元素和对应的计数
        上述三者的组合

        实际例子
        假设 start_idx = tensor([5, 10, 5, 20, 10])

        flat_idx = start_idx.reshape(-1) → tensor([5, 10, 5, 20, 10])
        flat_idx, counts = torch.unique(flat_idx, return_counts=True) →
        flat_idx = tensor([5, 10, 20]) (唯一元素)
        counts = tensor([2, 2, 1]) (对应元素出现次数)
        self.sample_visits[flat_idx] += counts:
        self.sample_visits[5] += 2
        self.sample_visits[10] += 2
        self.sample_visits[20] += 1
        '''
        flat_idx, counts = np.unique(flat_idx, return_counts=True) # flat_idx：返回找到的唯一索引， counts：返回每个唯一索引的计数，更准确的说应该是去重 flat_idx shape is (k,), counts shape is (k,)
        self.sample_visits[flat_idx] += counts # 更新对应索引的访问次数

        idx = np.expand_dims(start_idx, axis=-1) + np.arange(sequence_length) # start_idx.unsqueeze(-1) shape is (batch_size, 1), torch.arange(sequence_length, device=self.device) shape is (sequence_length,)
        # idx shape is (batch_size, sequence_length)
        return idx 



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))



def generate_uniform_indices(batch_size, sequence_length, replay_buffer_size, extra=0, device='cpu'):
    '''
    extra: 2 for context + next

    return: 生成一个迭代器，每次返回一个batch_size个序列的起始索引，形状为(batch_size, sequence_length + extra)
    这里的sequence_length是指每个序列的长度，extra是指额外的上下文长度，比如2表示上下文和下一个状态
    这里的batch_size是指每次生成的序列的数量
    这里的start_offset是一个随机的偏移量，表示从哪个位置开始生成序列
    这里的start_idx是一个范围为[start_offset, size - sequence_length]的索引，并且其步长为sequence_length，这里应该是获取了多段squences的起始索引
    '''
    start_offset = random.randint(0, sequence_length - 1) # 获取一个随机的偏移量 n（小于sequence_length）
    start_idx = torch.arange(start_offset, replay_buffer_size - sequence_length, sequence_length,
                                dtype=torch.long, device=device) # 获取了一个范围为[start_offset, size - sequence_length]的索引，并且其步长为sequence_length，这里应该是获取了多段squences的起始索引， shape ((size - sequence_length - start_offset) // sequence_length)
    start_idx = start_idx[torch.randperm(start_idx.shape[0], device=device)] # 打乱这些索引
    while len(start_idx) > 0:
        idx = start_idx[:batch_size] # 获取batch_size个起始序列索引， shape is (batch_size,)
        idx = idx.unsqueeze(-1) + torch.arange(sequence_length + extra, device=device) # shape is (batch_size, sequence_length + extra)
        yield idx
        start_idx = start_idx[batch_size:]


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        pass

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        # Replace tensorboard logging with wandb
        metrics = {
            "speed": speed,
            "reward_100": mean_reward,
            "reward": reward,
            "frame": frame,
        }
        if epsilon is not None:
            metrics["epsilon"] = epsilon
        wandb.log(metrics)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])
    


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


import collections



class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[19:169, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info


class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)
    

class NoAutoReset(gym.Wrapper):
    '''
    这个包装器的作用是什么？
    是一个 Gym 环境包装器，它的主要作用是防止环境在 episode 结束时自动重置
    '''

    def __init__(self, env):
        super().__init__(env)
        self.final_observation = None
        self.final_info = None

    def reset(self, seed=None, options=None):
        if self.final_observation is None or (options is not None and options.get('force', False)):
            return self.env.reset(seed=seed, options=options)
        return self.final_observation, self.final_info

    def step(self, action):
        obs, reward, terminated, truncated, info = self.env.step(action)
        if terminated or truncated:
            self.final_observation = obs
            self.final_info = info
        return obs, reward, terminated, truncated, info


def wrap_dqn(env, episodic_life=True, gray = True, eval=False):
    env = gym.make(env, frameskip=4, repeat_action_probability=0)
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    env = ProcessFrame(env, gray=gray, size=84)
    env = FrameStack(env=env, k=4)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = RewardPenaltyWrapper(env)

    if eval:
        env = NoAutoReset(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss



def update_metrics(metrics, new_metrics, prefix=None):
    '''
    将新的指标更新到现有的指标字典中。
    '''
    def process(key, t):
        if isinstance(t, (int, float)):
            return t
        assert torch.is_tensor(t), key
        assert not t.requires_grad, key
        assert t.ndim == 0 or t.shape == (1,), key
        return t.clone()

    if prefix is None:
        metrics.update({key: process(key, value) for key, value in new_metrics.items()})
    else:
        metrics.update({f'{prefix}{key}': process(key, value) for key, value in new_metrics.items()})
    return metrics


def combine_metrics(metrics, prefix=None):
    result = {}
    if prefix is None:
        for met in metrics:
            update_metrics(result, met)
    else:
        for met, pre in zip(metrics, prefix):
            update_metrics(result, met, pre)
    return result


'''
让我分析一下 `mean_metrics` 函数的功能：

### 函数分析

是的，`mean_metrics` 函数的主要功能是计算多个时间点收集的指标的平均值，但有一些特殊情况：

1. **基本功能**
- 接收一个包含多个时间点指标字典的列表 `metrics_history`
- 计算每个指标across时间的平均值

2. **特殊处理**
- 如果 `metrics_history` 为空，返回空字典 `{}`
- 如果只有一个时间点的指标，直接返回该时间点的指标
- 对于在 `except_keys` 中的指标或 `WBValue` 类型的指标，不计算平均值，而是使用最后一个值

### 代码示例

```python
# 假设有以下指标历史
metrics_history = [
    {'loss': 0.5, 'accuracy': 0.8, 'episode': 1},
    {'loss': 0.3, 'accuracy': 0.9, 'episode': 2},
    {'loss': 0.2, 'accuracy': 0.95, 'episode': 3}
]

# 计算平均值，但排除 'episode' 指标
result = mean_metrics(metrics_history, except_keys=['episode'])

# result 将类似于:
# {
#     'loss': 0.33,        # (0.5 + 0.3 + 0.2) / 3
#     'accuracy': 0.88,    # (0.8 + 0.9 + 0.95) / 3
#     'episode': 3         # 使用最后一个值
# }
```

### 实现细节

- 使用 `collections.defaultdict` 来收集每个指标的历史值
- 使用 `compute_mean` 函数来计算平均值，该函数支持：
  - PyTorch tensor 的平均值计算
  - 列表或元组类型的数据转换为 tensor 后计算平均值
'''
def mean_metrics(metrics_history, except_keys=None):
    if len(metrics_history) == 0:
        return {}
    if len(metrics_history) == 1:
        return metrics_history[0]
    except_keys = set() if except_keys is None else set(except_keys)
    result = {}
    value_history = collections.defaultdict(lambda: [])
    for metrics in metrics_history:
        for key, value in metrics.items():
            if key in except_keys or isinstance(value, WBValue):
                result[key] = value  # use last value
            else:
                value_history[key].append(value)
    result.update({key: compute_mean(values) for key, values in value_history.items()})
    return result


class MetricsSummarizer:

    def __init__(self, except_keys=None):
        self.metrics_history = []
        self.except_keys = set() if except_keys is None else set(except_keys)

    def append(self, metrics):
        self.metrics_history.append(metrics)

    def summarize(self):
        summary = mean_metrics(self.metrics_history, except_keys=self.except_keys)
        self.metrics_history = []
        return summary


def compute_mean(values):
    if torch.is_tensor(values):
        return values.float().mean()
    if isinstance(values, (tuple, list)):
        return torch.stack([torch.as_tensor(x).detach() for x in values]).float().mean()
    raise ValueError()


def random_choice(n, num_samples, replacement=False, device=None):
    if replacement:
        return torch.randint(0, n, (num_samples,), device=device)

    weights = torch.ones(n, device=device)
    return torch.multinomial(weights, num_samples, replacement=False)


def windows(x, window_size, window_stride=1):
    '''
    dimension: 要展开的维度
    size: 每个窗口的大小
    step: 窗口之间的步长

    方法沿指定维度创建一系列固定大小的滑动窗口，将原始张量展开为一个更高维的张量，其中新增的最后一个维度包含所有提取的窗口

    假设 x 的形状是 [32, 100, 64] window_size=10, window_stride=5
    x.unfold(1, 10, 5) 在时间维度上创建大小为10、步长为5的窗口
    结果形状: [32, 19, 64, 10]（19 = (100-10)/5 + 1）

    '''
    x = x.unfold(1, window_size, window_stride)
    dims = list(range(x.ndim))[:-1] # 获取除了最后一个维度的所有维度
    dims.insert(2, x.ndim - 1) # 将最后一个维度移动到第三个位置
    x = x.permute(dims) # 操作重排维度，使窗口维度在第三位 结果形状: [32, 19, 10, 64]
    return x


def same_batch_shape(tensors, ndim=2):
    batch_shape = tensors[0].shape[:ndim]
    assert all(t.ndim >= ndim for t in tensors)
    return all(tensors[i].shape[:ndim] == batch_shape for i in range(1, len(tensors)))


def same_batch_shape_time_offset(a, b, offset):
    assert a.ndim >= 2 and b.ndim >= 2
    return a.shape[:2] == (b.shape[0], b.shape[1] + offset)


def check_no_grad(*tensors):
    '''
    确保所有张量都不需要梯度。
    '''
    return all((t is None or not t.requires_grad) for t in tensors)


class AdamOptim:

    def __init__(self, parameters, lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, grad_clip=0):
        self.parameters = list(parameters)
        self.grad_clip = grad_clip
        self.optimizer = optim.Adam(self.parameters, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10000, gamma=0.96)

    def step(self, loss):
        self.optimizer.zero_grad()
        loss.backward()
        if self.grad_clip > 0:
            nn.utils.clip_grad_norm_(self.parameters, self.grad_clip)
        self.optimizer.step()
        # self.scheduler.step()

    def state_dict(self):
        return {
            "optimizer": self.optimizer.state_dict(),
            "scheduler": self.scheduler.state_dict(),
        }
    
    def load_state_dict(self, state_dict):
        self.optimizer.load_state_dict(state_dict["optimizer"])
        self.scheduler.load_state_dict(state_dict["scheduler"])
        # 重新设置学习率
        for param_group in self.optimizer.param_groups:
            print("当前学习率： {}".format(param_group['lr']))

