import sys
import time
import numpy as np
from collections import deque

import torch
import torch.nn as nn
import torch.nn.functional as F
import ptan
import os.path
import time
from typing import Any
import numpy as np
import sys
import torch
import torch.nn as nn
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
import cv2
from PIL import Image
import math

HID_SIZE = 128


def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


class SlacObservation:
    """
    Observation for SLAC.
    """

    def __init__(self, state_shape, action_shape, num_sequences):
        '''
        param state_shape: 环境shape
        param action_shape: 动作shape
        param num_sequences: 作用？todo 这个值默认是8
        '''
        self.state_shape = state_shape
        self.action_shape = action_shape
        self.num_sequences = num_sequences

    def reset_episode(self, state):
        '''
        传入环境观察

        注意，这里的state长度只有num_sequences
        但是在训练时，传入的state是num_sequences + 1 todo 找到为啥
        反而动作是num_sequences - 1
        '''

        # 构建一个长度为num_sequences的队列
        self._state = deque(maxlen=self.num_sequences) # uint8
        self._action = deque(maxlen=self.num_sequences - 1) # 浮点数
        # 初始的值是0
        for _ in range(self.num_sequences - 1):
            self._state.append(np.zeros(self.state_shape, dtype=np.uint8))
            self._action.append(np.zeros(self.action_shape, dtype=np.float32))
        # 保存观察，初始状态保存的是reset的观察，所以没有动作
        self._state.append(state)

    def append(self, state, action):
        '''
        params state: 环境观察，这里传入的相当于执行动作后的next_state
        params action: 到达state所执行的动作，
        '''
        self._state.append(state)
        self._action.append(action)

    @property
    def state(self):
        return np.array(self._state)[None, ...]

    @property
    def action(self):
        return np.array(self._action).reshape(1, -1)




def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

import os


import collections

class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)



class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        global count_frame
        x_t = img
        # ProcessFrame84.save_state_as_image(x_t, r'state_image.png')
        x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114
        x_t = cv2.resize(x_t, (84, 84), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [84, 84, 1])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)



def tie_weights(src, trg):
    assert type(src) == type(trg)
    trg.weight = src.weight
    trg.bias = src.bias
    


def weight_init(m):
    """Custom weight init for Conv2D and Linear layers."""
    if isinstance(m, nn.Linear):
        nn.init.orthogonal_(m.weight.data)
        m.bias.data.fill_(0.0)
    elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        # delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
        assert m.weight.size(2) == m.weight.size(3)
        m.weight.data.fill_(0.0)
        m.bias.data.fill_(0.0)
        mid = m.weight.size(2) // 2
        gain = nn.init.calculate_gain('relu')
        nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0
        self.non_reward_frames = 0
        self.non_reward_frames_limit = 100
        self.non_reward_loss = 0.01

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
               
        # # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward = -self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    


def wrap_dqn(env, stack_frames=4, action_repeat=4):
    env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=action_repeat)
    env = Ram2RGBWrapper(env, n_frames=stack_frames, obs_size=64, gray=False)
    # env = TransposeObservation(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss


class EpsilonTracker:
    def __init__(self, epsilon_greedy_selector, epsilon_start, epsilon_final, epsilon_frames):
        '''
        这个跟踪器是根据当前执行的帧数大小，以及epslison的参数来计算动作选择其中
        选择动作的概率

        param epsilon_greedy_selector: 动作选择器
        param params: map类型的参数，也就是本文中的HYPERPARAMS参数
        '''

        self.epsilon_greedy_selector = epsilon_greedy_selector
        self.epsilon_start = epsilon_start
        self.epsilon_final = epsilon_final
        self.epsilon_frames = epsilon_frames
        self.frame(0)

    def frame(self, frame):
        '''
        根据当前的帧数，更新epsilon的值，eplison是用来决定每次执行的策略是
        随机还是神经网络推理获取的
        '''
        self.epsilon_greedy_selector.epsilon = \
            max(self.epsilon_final, self.epsilon_start - frame / self.epsilon_frames)
        

"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


class EnvSampleAgent(ptan.agent.BaseAgent):
    """
    Policy agent gets action probabilities from the model and samples actions from it
    """
    # TODO: unify code with DQNAgent, as only action selector is differs.
    def __init__(self, env, device="cpu"):
        '''
            model: 策略动作推理网络
            preprocessor: 将计算的结果转换的数据类型，比如转换为float32
            apply_softmax: 使用对model的计算结果使用softmax计算结果
        '''
        self.env = env
        self.device = device


    @torch.no_grad()
    def __call__(self, observation, agent_states=None):

        return [self.env.action_space.sample()], agent_states
    


class SlacRandomAgent(ptan.agent.BaseAgent):

    def __init__(self, net, obs_shape, action_shape, num_sequences, device="cpu"):
        super().__init__()

        self.net = net
        self.device = device
        # t这两个观察是什么作用？用于采集数据和评估模型时传入的ob
        self.ob = SlacObservation(obs_shape, action_shape, num_sequences)
        self.reset = True
        self.pre_action = None

    
    def initial_state(self):
        self.reset = True
        return None
    

    @torch.no_grad()
    def __call__(self, observation, agent_states=None):
        
        if self.reset:
            # 重置状态
            if isinstance(observation, list):
                observation = observation[0]
            self.ob.reset_episode(observation)
            self.reset = False
        else:
            if isinstance(observation, list):
                observation = observation[0]
            self.ob.append(observation, self.pre_action)

        action = self.net(self.ob)
        self.pre_action = action
        return [action], agent_states



def create_feature_actions(feature_, action_):
    '''
    feature shape: (N, num_sequences + 1, feature_dim)
    action shape: (N, num_sequences, action_dim)
    '''
    N = feature_.size(0) # batch size
    # Flatten sequence of features.
    f = feature_[:, :-1].view(N, -1) # 去掉最后一个序列 shape: (N, num_sequences * feature_dim)
    n_f = feature_[:, 1:].view(N, -1) # 去掉第一个序列 shape: (N, num_sequences * feature_dim)
    # Flatten sequence of actions.
    a = action_[:, :-1].view(N, -1) # 去掉最后一个动作 shape: (N, (num_sequences - 1) * action_dim)
    n_a = action_[:, 1:].view(N, -1) # 去掉第一个动作 shape: (N, (num_sequences - 1) * action_dim)
    # Concatenate feature and action.
    fa = torch.cat([f, a], dim=-1) # todo 为啥能拼接 调试看看 这里相当于
    n_fa = torch.cat([n_f, n_a], dim=-1)
    return fa, n_fa


def soft_update(target, source, tau):
    for t, s in zip(target.parameters(), source.parameters()):
        t.data.mul_(1.0 - tau)
        t.data.add_(tau * s.data)


def grad_false(network):
    for param in network.parameters():
        param.requires_grad = False


def build_mlp(
    input_dim,
    output_dim,
    hidden_units=[64, 64],
    hidden_activation=nn.Tanh(),
    output_activation=None,
):
    '''
    构建mlp层
    :param input_dim: 输入维度
    :param output_dim: 输出维度
    :param hidden_units: 隐藏层单元数
    :param hidden_activation: 隐藏层激活函数
    :param output_activation: 输出层激活函数
    '''
    layers = []
    units = input_dim
    for next_units in hidden_units:
        layers.append(nn.Linear(units, next_units))
        layers.append(hidden_activation)
        units = next_units
    layers.append(nn.Linear(units, output_dim))
    if output_activation is not None:
        layers.append(output_activation)
    return nn.Sequential(*layers)


def calculate_gaussian_log_prob(log_std, noise):
    '''
    params log_std: 方差
    params noise: 噪声
    根据 SLAC（Stochastic Latent Actor-Critic）算法的背景，calculate_gaussian_log_prob 函数的作用是计算多维高斯分布的对数概率密度函数（log-probability density function, log-pdf）。具体来说，它计算给定噪声 noise 在高斯分布下的对数概率
    '''
    '''
    -0.5 * noise.pow(2):

    计算标准正态分布的平方项（-0.5 * (x - μ)^2 / σ^2），这里假设均值为 0，标准差为 1。
    - log_std:

    对概率密度函数的标准差部分取对数。
    .sum(dim=-1, keepdim=True):

    对最后一个维度求和，表示计算多维高斯分布的联合对数概率。
    - 0.5 * math.log(2 * math.pi) * log_std.size(-1):

    计算高斯分布的归一化常数部分（1 / sqrt(2πσ^2)），并扩展到多维

    返回的是 noise 在高斯分布下的对数概率密度值
    todo 跟踪后续概率密度在哪里使用
    '''
    return (-0.5 * noise.pow(2) - log_std).sum(dim=-1, keepdim=True) - 0.5 * math.log(2 * math.pi) * log_std.size(-1)


def calculate_log_pi(log_std, noise, action):
    '''
    param log_std: 方差
    param noise: 噪声
    param action: 添加噪声后的动作
    '''
    # 得到noise 在高斯分布下的对数概率密度值
    gaussian_log_prob = calculate_gaussian_log_prob(log_std, noise)
    '''
    **Answer:**
`calculate_log_pi` 先用 `calculate_gaussian_log_prob` 计算噪声在高斯分布下的对数概率，然后减去
\(\log\bigl(1 - \text{action}^2 + 1e-6\bigr)\) 的求和，用于修正动作通过 `tanh` 变换后带来的分布变化。它最终返回添加噪声后的动作在策略分布下的对数概率，经常用于策略梯度方法里更新策略网络时所需的 \(\log \pi(a|s)\)。
    '''
    return gaussian_log_prob - torch.log(1 - action.pow(2) + 1e-6).sum(dim=-1, keepdim=True)


def reparameterize(mean, log_std):
    '''
    对预测的均值添加噪声
    '''
    noise = torch.randn_like(mean)
    # 因为tah会对预测的均值的值进行更改，缩放到合适的动作空间，那么同时方差也要做适配，所以进行calculate_log_pi计算tanh对应的log方差
    action = torch.tanh(mean + noise * log_std.exp())
    return action, calculate_log_pi(log_std, noise, action)


def calculate_kl_divergence(p_mean, p_std, q_mean, q_std):
    '''
    计算先验分布和后验分布之间的KL散度
    todo 查看这种方式的计算数学公式与之对应
    '''
    var_ratio = (p_std / q_std).pow_(2)
    t1 = ((p_mean - q_mean) / q_std).pow_(2)
    return 0.5 * (var_ratio + t1 - 1 - var_ratio.log())



def reparameterize(mean, log_std):
    '''
    对预测的均值添加噪声
    '''
    noise = torch.randn_like(mean)
    # 因为tah会对预测的均值的值进行更改，缩放到合适的动作空间，那么同时方差也要做适配，所以进行calculate_log_pi计算tanh对应的log方差
    action = torch.tanh(mean + noise * log_std.exp())
    return action, calculate_log_pi(log_std, noise, action)
    

def initialize_weight(m):
    '''
    初始化权重
    w采用xavier_uniform_初始化
    b采用0初始化
    '''
    if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        nn.init.xavier_uniform_(m.weight, gain=1.0)
        if m.bias is not None:
            nn.init.constant_(m.bias, 0)



class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)
    


class Ram2RGBWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4, obs_size=96, gray=True):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)
        self.obs_size = obs_size
        self.gray = gray

        # 修改观察空间为 RGB 图像空间
        self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.obs_size, self.obs_size, (1 if self.gray else 3) * n_frames), dtype=np.uint8)

        self.obs = []

    def _get_rgb_observation(self):
        # 渲染环境并获取 RGB 图像,s缩放图像并转换为灰度图像
        obs = self.env.render()
        obs = cv2.resize(obs, (self.obs_size, self.obs_size))
        if self.gray:
            obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
            obs = np.expand_dims(obs, axis=-1)
        return obs
    

    def reset(self, **kwargs):
        _, info = self.env.reset(**kwargs)
        rgb_obs = self._get_rgb_observation()
        for _ in range(self.n_frames):
            self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), info
    

    def step(self, action):
        _, reward, terminated, truncated, info = self.env.step(action)
        rgb_obs = self._get_rgb_observation()
        self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), reward, terminated, truncated, info
    

def unpack_latent_batch(batch, batch_size_latent, num_sequences, obs_shape, action_shape, device):
    observations = np.empty((batch_size_latent, num_sequences + 1) + obs_shape, dtype=np.uint8)
    actions = np.empty((batch_size_latent, num_sequences, action_shape[0]), dtype=np.float32)
    rewards = np.empty((batch_size_latent, num_sequences, 1), dtype=np.float32)
    done = np.empty((batch_size_latent, num_sequences, 1), dtype=np.float32)
    for batch_idx in range(0, batch_size_latent):
        cur_batch = batch[batch_idx]
        observations[batch_idx][0] = cur_batch[0][0][0]
        for step_idx in range(1, num_sequences + 1):
            observations[batch_idx][step_idx] = cur_batch[step_idx][0][0]
            actions[batch_idx][step_idx - 1] = cur_batch[step_idx][0][1]
            rewards[batch_idx][step_idx - 1] = cur_batch[step_idx][0][2]
            done[batch_idx][step_idx - 1] = cur_batch[step_idx][0][3]


    state_ = torch.tensor(observations, dtype=torch.float).div_(255.0).to(device=device)
    action_ = torch.tensor(actions).to(device=device)
    reward_ = torch.tensor(rewards).to(device=device)
    done_ = torch.tensor(done).to(device=device)

    return state_, action_, reward_, done_


def unpack_sac_batch(batch, batch_size, num_sequences, obs_shape, action_shape, device):
    observations = np.empty((batch_size, num_sequences + 1) + obs_shape, dtype=np.uint8)
    actions = np.empty((batch_size, num_sequences, action_shape[0]), dtype=np.float32)
    rewards = np.empty((batch_size, 1), dtype=np.float32)
    done = np.empty((batch_size, 1, 1), dtype=np.float32)
    for batch_idx in range(0, batch_size):
        cur_batch = batch[batch_idx]
        observations[batch_idx][0] = cur_batch[0][0][0]
        rewards[batch_idx][0] = cur_batch[-1][0][2]
        done[batch_idx][0] = cur_batch[-1][0][3]
        for step_idx in range(1, num_sequences + 1):
            observations[batch_idx][step_idx] = cur_batch[step_idx][0][0]
            actions[batch_idx][step_idx - 1] = cur_batch[step_idx][0][1]


    state_ = torch.tensor(observations, dtype=torch.float).div_(255.0).to(device=device)
    action_ = torch.tensor(actions).to(device=device)
    reward_ = torch.tensor(rewards).to(device=device)
    done_ = torch.tensor(done).to(device=device)

    return state_, action_, reward_, done_