import os.path
from collections import deque
from typing import Any
import numpy as np
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import ptan
import gymnasium as gym
from gymnasium import spaces
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
from torch import distributions as pyd
import math
import time



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        pass

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        sys.stdout.flush()
        if epsilon is not None:
            self.writer.log("train/epsilon", epsilon, frame)
        self.writer.log("train/speed", speed, frame)
        self.writer.log("train/reward_100", mean_reward, frame)
        self.writer.log("train/reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])


class eval_mode(object):
    def __init__(self, *models):
        self.models = models

    def __enter__(self):
        self.prev_states = []
        for model in self.models:
            self.prev_states.append(model.training)
            model.train(False)

    def __exit__(self, *args):
        for model, state in zip(self.models, self.prev_states):
            model.train(state)
        return False


class DrqAgent(ptan.agent.BaseAgent):

    def __init__(self, params, net, env, device='cpu', preprocessor=ptan.agent.default_states_preprocessor):
        super().__init__()

        self.params = params
        self.net = net
        self.device = device
        self.step_count = 0
        self.env = env
        self.preprocessor = preprocessor

    
    def initial_state(self):
        return None
    

    @torch.no_grad()
    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states with the same length as observations
        :return: tuple of actions, states
        """
        
        actions = []
        if self.step_count < self.params.num_seed_steps:
            for _ in range(len(states)):
                actions.append(self.env.action_space.sample())
        else:
            if self.preprocessor is not None:
                states = self.preprocessor(states)
                if torch.is_tensor(states):
                    states = states.to(self.device)
            with eval_mode(self.net):
                actions = self.net.act(states)
        
        self.step_count += 1
        return actions, agent_states


class TanhTransform(pyd.transforms.Transform):
    domain = pyd.constraints.real
    codomain = pyd.constraints.interval(-1.0, 1.0)
    bijective = True
    sign = +1

    def __init__(self, cache_size=1):
        super().__init__(cache_size=cache_size)

    @staticmethod
    def atanh(x):
        return 0.5 * (x.log1p() - (-x).log1p())

    def __eq__(self, other):
        return isinstance(other, TanhTransform)

    def _call(self, x):
        return x.tanh()

    def _inverse(self, y):
        # We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
        # one should use `cache_size=1` instead
        return self.atanh(y)

    def log_abs_det_jacobian(self, x, y):
        # We use a formula that is more numerically stable, see details in the following link
        # https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
        return 2. * (math.log(2.) - x - F.softplus(-2. * x))



class ScaledTanhTransform(pyd.transforms.Transform):
    """
    将tanh变换扩展到任意区间[low, high]
    """
    bijective = True
    sign = +1
    
    def __init__(self, low, high, cache_size=1):
        super().__init__(cache_size=cache_size)
        self.low = low
        self.high = high
        self.scale = (high - low) / 2.0
        self.loc = (high + low) / 2.0
        
        # 动态设置domain和codomain
        self.domain = pyd.constraints.real
        self.codomain = pyd.constraints.interval(low, high)
    
    @staticmethod
    def atanh(x):
        """数值稳定的atanh实现，复用原始代码"""
        return 0.5 * (x.log1p() - (-x).log1p())
    
    def __eq__(self, other):
        """比较两个变换是否相等"""
        return (isinstance(other, ScaledTanhTransform) and 
                self.low == other.low and 
                self.high == other.high)
    
    def _call(self, x):
        """正向变换：R → [low, high]"""
        # x → tanh(x) → scale*tanh(x) + loc
        return torch.tanh(x) * self.scale + self.loc
    
    def _inverse(self, y):
        """反向变换：[low, high] → R"""
        # y → (y - loc)/scale → atanh((y - loc)/scale)
        # 首先将y映射回[-1, 1]
        normalized = (y - self.loc) / self.scale
        return self.atanh(normalized)
    
    def log_abs_det_jacobian(self, x, y):
        """
        计算雅可比行列式的对数
        
        对于 y = scale * tanh(x) + loc:
        dy/dx = scale * (1 - tanh²(x)) = scale * sech²(x)
        log|dy/dx| = log(scale) + log(sech²(x))
                   = log(scale) + 2*log(sech(x))
        """
        # 复用原始TanhTransform的数值稳定实现
        tanh_jacobian = 2. * (math.log(2.) - x - F.softplus(-2. * x))
        # 加上缩放因子的对数
        scale_log = math.log(abs(self.scale))
        return tanh_jacobian + scale_log

class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
    def __init__(self, loc, scale, low=-1.0, high=1.0):
        '''
        loc: 均值
        scale: 标准差  
        low: 动作空间下界
        high: 动作空间上界
        '''
        self.loc = loc
        self.scale = scale
        self.low = low
        self.high = high

        self.base_dist = pyd.Normal(loc, scale)
        
        # 根据动作范围选择合适的变换
        if low == -1.0 and high == 1.0:
            transforms = [TanhTransform()]
        else:
            transforms = [ScaledTanhTransform(low, high)]
            
        super().__init__(self.base_dist, transforms)

    @property
    def mean(self):
        """计算变换后分布的均值"""
        mu = self.loc
        for tr in self.transforms:
            mu = tr(mu)
        return mu
    


def select_device(gpu):
    if gpu and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and gpu:
        return torch.device("mps")
    return torch.device("cpu")


def select_device_str(gpu):
    if gpu and torch.cuda.is_available():
        return "cuda"
    elif torch.backends.mps.is_available() and gpu:
        return "mps"
    return "cpu"



class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[19:169, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

class Ram2RGBWrapper(gym.Wrapper):
    def __init__(self, env, n_frames=4, obs_size=96, gray=True):
        super().__init__(env)
        self.env = env
        self.n_frames = n_frames
        self.frames = deque([], maxlen=n_frames)
        self.obs_size = obs_size
        self.gray = gray

        # 修改观察空间为 RGB 图像空间
        self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.obs_size, self.obs_size, (1 if self.gray else 3) * n_frames), dtype=np.uint8)

        self.obs = []

    def _get_rgb_observation(self):
        # 渲染环境并获取 RGB 图像,s缩放图像并转换为灰度图像
        obs = self.env.render()
        obs = cv2.resize(obs, (self.obs_size, self.obs_size))
        if self.gray:
            obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
            obs = np.expand_dims(obs, axis=-1)
        return obs
    

    def reset(self, **kwargs):
        _, info = self.env.reset(**kwargs)
        rgb_obs = self._get_rgb_observation()
        for _ in range(self.n_frames):
            self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), info
    

    def step(self, action):
        _, reward, terminated, truncated, info = self.env.step(action)
        rgb_obs = self._get_rgb_observation()
        self.frames.append(rgb_obs)
        return np.concatenate(list(self.frames), axis=-1), reward, terminated, truncated, info


def wrap_dqn(env, stack_frames=4, action_repeat=4, obs_size=84):
    env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=action_repeat)
    env = Ram2RGBWrapper(env, n_frames=stack_frames, obs_size=obs_size, gray=False)
    # env = TransposeObservation(env)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    return env



def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss


def tie_weights(src, trg):
    assert type(src) == type(trg)
    trg.weight = src.weight
    trg.bias = src.bias



def weight_init(m):
    """Custom weight init for Conv2D and Linear layers."""
    if isinstance(m, nn.Linear):
        nn.init.orthogonal_(m.weight.data)
        if hasattr(m.bias, 'data'):
            m.bias.data.fill_(0.0)
    elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        gain = nn.init.calculate_gain('relu')
        nn.init.orthogonal_(m.weight.data, gain)
        if hasattr(m.bias, 'data'):
            m.bias.data.fill_(0.0)


def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
    if hidden_depth == 0:
        mods = [nn.Linear(input_dim, output_dim)]
    else:
        mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
        for i in range(hidden_depth - 1):
            mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
        mods.append(nn.Linear(hidden_dim, output_dim))
    if output_mod is not None:
        mods.append(output_mod)
    trunk = nn.Sequential(*mods)
    return trunk

def to_np(t):
    if t is None:
        return None
    elif t.nelement() == 0:
        return np.array([])
    else:
        return t.cpu().detach().numpy()