import os.path
from typing import Any
import numpy as np
import sys
import torch
import argparse
import random
import ptan
import gymnasium as gym
from gymnasium import spaces
import logging
from logging.handlers import RotatingFileHandler
from dataclasses import dataclass, field
from typing import Tuple, Optional
import cv2
from PIL import Image
import time
from collections import Counter



def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))


"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        self.total_steps = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, end_infos, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        for end_info in end_infos:
            self.total_rewards.append(end_info[0])
            self.total_steps.append(end_info[1])
            # 计算当前的平均帧率
            speed = (frame - self.ts_frame) / (time.time() - self.ts)
            # 将当前帧总数和所花费的时间存储在缓存中
            self.ts_frame = frame
            self.ts = time.time()
            # 计算平均激励值
            mean_reward = np.mean(self.total_rewards[-100:])
            mean_step = np.mean(self.total_steps[-100:])
            epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
            print("%d: done %d games, mean reward %.3f, mean step %.3f, speed %.2f f/s%s" % (
                frame, len(self.total_rewards), mean_reward, mean_step, speed, epsilon_str
            ))
            sys.stdout.flush()
            if epsilon is not None:
                self.writer.add_scalar("epsilon", epsilon, frame)
            self.writer.add_scalar("speed", speed, frame)
            self.writer.add_scalar("reward_100", mean_reward, frame)
            self.writer.add_scalar("reward", end_info[0], frame)
            self.writer.add_scalar("step_100", mean_step, frame)
            self.writer.add_scalar("step", end_info[1], frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            return True
        return False


@dataclass
class AgentState(ptan.experience.BaseAgentState):
    obs: torch.Tensor # 相当于next_obs
    action_dim: int # 动作维度
    last_action: torch.Tensor = field(init=False) # 得到next_obs的动作执行的动作
    last_reward: torch.Tensor = torch.zeros((1, 1), dtype=torch.float32)
    hidden_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
    q_value: torch.Tensor = field(init=False)

    def __post_init__(self):
        self.last_action = torch.zeros((1, self.action_dim), dtype=torch.float32)
    
    def update(self, obs, action, reward, done, next_obs):
        '''
        obs: 最新观察值
        last_action: 到该obs所执行的动作
        last_reward: 到该obs所获得的奖励
        hidden: LSTM的隐藏状态
        '''
        self.obs = torch.from_numpy(next_obs).unsqueeze(0)
        # 将动作转换为one-hot编码
        self.last_action = torch.FloatTensor([[1 if i == action else 0 for i in range(self.action_dim)]])
        self.last_reward = torch.FloatTensor([[reward]])


def choose_action(env, net, state, header_number:int=None, epsilon=None, device='cpu'):
        '''
        history： historyDataset object, 观察样本处理对象
        header_number: int, number of ensemble head, 选择哪个头
        epsilon: float, epsilon for epsilon-greedy action selection, epsilon-贪婪策略中的epsilon
        '''
        if epsilon is not None:
            # 如果开启了epsilon-贪婪策略
            if np.random.random() <= epsilon:
                # 随机选中额一个动作
                return env.action_space.sample()
            else:
                # 如果没有随机选中动作，则使用目标模型选择动作
                with torch.no_grad():
                    state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                    if header_number is not None:
                        # 如果指定了头编号，则使用指定的头进行动作选择
                        action = net(state, header_number).cpu()
                        return int(action.max(1).indices.numpy())
                    else:
                        # vote
                        # 没有指定头编号，则使用所有头进行投票选择动作
                        # 也就是每个头选择一个动作，然后选择动作Q值最大出现频率最高的动作
                        actions = net(state)
                        actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                        actions = Counter(actions)
                        action = actions.most_common(1)[0][0]
                        return action
        else:
            # 如果没有传递epsilon参数，则直接使用训练的动作策略模型选择动作
            with torch.no_grad():
                state = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                if header_number is not None:
                    action = net(state, header_number).cpu()
                    return int(action.max(1).indices.numpy())
                else:
                    # vote
                    actions = net(state)
                    actions = [int(action.cpu().max(1).indices.numpy()) for action in actions]
                    actions = Counter(actions)
                    action = actions.most_common(1)[0][0]
                    return action



class BootStrappedAgent(ptan.agent.BaseAgent):

    def __init__(self, params, net, env, device='cpu'):
        super().__init__()

        self.params = params
        self.net = net
        self.device = device
        self.eps_end = self.params.eps_end
        self.eps_start = self.params.ep
        self.eps_start_step = self.params.learn_start
        self.eps_end_step = self.params.eps_endt
        self.env = env
        self.cur_step = 0
        self.heads = list(range(self.params.n_ensemble))
        self.active_head = self.heads[0]

    
    def initial_state(self):
        np.random.shuffle(self.heads)
        self.active_head = self.heads[0]

        return None
    

    def get_epsilon(self, t):
        '''
        t: int, current time step 当前时间步
        根据步数t计算epsilon的值
        '''
        # (self.ep - self.eps_end)： 代表epsilon的衰减范围
        # max(0, t - self.eps_start)： 代表从起始步数开始计算的当前时间步数
        # (self.eps_endt - max(0, t - self.eps_start)): 代表剩余的衰减步数
        # (self.eps_endt - max(0, t - self.eps_start))/self.eps_endt : 代表剩余衰减步数占总衰减步数的比例
        # (self.ep - self.eps_end) * (self.eps_endt - max(0, t - self.eps_start)) / self.eps_endt : 代表当前时间步下，epsilon的真实值
        epsilon =  self.eps_end + max(0, (self.eps_start - self.eps_end)*(self.eps_end_step - max(0, t - self.eps_start_step)) /self.eps_end_step )
        return epsilon
    
    

    def __call__(self, states, agent_states):
        """
        Convert observations and states into actions to take
        :param states: list of environment states to process
        :param agent_states: list of states with the same length as observations
        :return: tuple of actions, states
        """
        actions = []
        agent_state_inner = []
        for state, agent_state in zip(states, agent_states):
            action, agent_state = self.__inner_call__(state, agent_state)
            actions.append(action)
            agent_state_inner.append(agent_state)

        self.cur_step += 1

        return actions, agent_state_inner

    

    def __inner_call__(self, state, agent_state:AgentState):
        """
        Convert observation and state into action to take
        :param state: environment state to process
        :param agent_state: state with the same length as observations
        :return: tuple of actions, states
        """
        action = choose_action(
            self.env, self.net, state=state, header_number=self.active_head, epsilon=self.get_epsilon(self.cur_step), device=self.device
        )

        return action, agent_state
    


class BootStrappedReplayBuffer(ptan.experience.ExperienceReplayBuffer):

    
    def __init__(self, experience_source, buffer_size, n_ensemble=1, bernoulli_prob=0.9):
        super().__init__(experience_source, buffer_size)

        self.n_ensemble = n_ensemble
        self.bernoulli_prob = bernoulli_prob


    def _add(self, sample):
        """
        将经验样本与 Bernoulli 掩码拼接后添加到缓冲区
        
        Args:
            sample: 经验元组 (obs, action, reward, done, next_obs, agent_state, step_info)
        """
        # 生成 Bernoulli 掩码 - 每个集成头的随机掩码
        mask = np.random.binomial(1, self.bernoulli_prob, self.n_ensemble)
        
        # 检查 sample 是否为嵌套元组
        if len(sample) == 1 and isinstance(sample[0], tuple):
            # 如果是嵌套的，解包内层元组
            inner_sample = sample[0]
            # 将掩码添加到经验元组的末尾
            extended_sample = inner_sample + (mask,)
        else:
            # 如果不是嵌套的，直接添加掩码
            extended_sample = sample + (mask,)
        
        # 调用父类方法添加扩展后的样本
        super()._add(extended_sample)


    
    def state_dict(self):
        state = {
            "buffer": self.buffer,
            "capacity": self.capacity,
            "pos": self.pos
        }
        return state

    def load_state_dict(self, state):
        self.buffer = state["buffer"]
        self.capacity = state["capacity"]
        self.pos = state["pos"]



def select_device(args):
    if args.cuda and torch.cuda.is_available():
        return torch.device("cuda")
    elif torch.backends.mps.is_available() and args.cuda:
        return torch.device("mps")
    return torch.device("cpu")


class ProcessFrame(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None, gray=True, size=84):
        super(ProcessFrame, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(size, size, 1 if gray else 3), dtype=np.uint8)
        self.gray = gray
        self.size = size

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame.process(obs, self.gray, self.size)

    @staticmethod
    def process(img, gray=True, size=84):
        global count_frame
        x_t = img
        # ProcessFrame.save_state_as_image(x_t, r'state_image.png')
        if gray:
            x_t = x_t[:, :, 0] * 0.299 + x_t[:, :, 1] * 0.587 + x_t[:, :, 2] * 0.114

        x_t = x_t[23:210, :]
        x_t = cv2.resize(x_t, (size, size), interpolation=cv2.INTER_AREA)
        x_t = np.reshape(x_t, [size, size, 1 if gray else 3])
        # save_state_as_image(x_t, r'.\state_image.png')
        return x_t.astype(np.uint8)
    
    @staticmethod
    def save_state_as_image(state, filename):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')
        # Save image
        img.save(filename)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info
    


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward > 0:
        #     reward /= 100.0 * 8
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward = self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info
    

import collections

class FrameStack(gym.Wrapper):
    def __init__(self, env, k):
        super(FrameStack, self).__init__(env)
        self.k = k
        self.frames = collections.deque(maxlen=k)
        shp = env.observation_space.shape
        self.observation_space = gym.spaces.Box(
            low=0, high=255, shape=(shp[0] * k, *shp[1:]), dtype=np.float32
        )

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        for _ in range(self.k):
            self.frames.append(obs)
        return self._get_obs(), info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)
        self.frames.append(obs)
        return self._get_obs(), reward, done, truncated, info

    def _get_obs(self):
        # changed code: instead of LazyFrames, stack with NumPy
        return np.concatenate(list(self.frames), axis=0)


def wrap_dqn(env, episodic_life=True, gray = True, clip_reward = False, stack_frames = 4):
    env = gym.make(env, frameskip=4, repeat_action_probability=0)
    if episodic_life:
        # 将多条生命的游戏模拟成单条生命ActorCriticAgent
        env = ptan.common.wrappers.EpisodicLifeEnv(env)
    # 增强初始化
    env = ptan.common.wrappers.NoopResetEnv(env, noop_max=30)

    if 'FIRE' in env.unwrapped.get_action_meanings():
        env = FireResetEnv(env)
    # env = ptan.common.wrappers.MaxAndSkipEnv(env, skip=4)
    # todo 对比不同的分辨率、颜色的图像，因为理论上都可以
    env = ProcessFrame(env, gray=gray, size=84)
    env = ptan.common.wrappers.ImageToPyTorch(env)
    env = FrameStack(env, stack_frames)
    if clip_reward:
        env = RewardPenaltyWrapper(env)
    return env


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(console)

    return logger
    

def build_parser():
    parser = argparse.ArgumentParser()

    parser.add_argument("--cuda", default=True, action='store_true', help='Enable CUDA')
    parser.add_argument("-n", "--name", default='MrDo', help="Name of the run")

    parser.add_argument("--refer_img", dest="refer_img", metavar="refer_img", default=None)


    parser.add_argument("--memory_size", dest="memory_size", metavar="memory_size", type=int, default=int(1e6))
    parser.add_argument("--update_freq", dest="update_freq", metavar="update_freq", type=int, default=4)
    parser.add_argument("--learn_start", dest="learn_start", metavar="learn_start", type=int, default=50000)
    parser.add_argument("--history_size", dest="history_size", metavar="history_size", type=int, default=4)
    parser.add_argument("--target_update", dest="target_update", metavar="target_update", type=int, default=10000)

    parser.add_argument("--n_ensemble", dest="n_ensemble", type=int, default=9)
    parser.add_argument("--bernoulli_prob", dest="bernoulli_prob", type=float, default=0.9)

    ##Learning rate
    parser.add_argument("--batch_size", dest="batch_size", metavar="batch_size", type=int, default=32)
    parser.add_argument("--ep", dest="ep", metavar="ep", type=int, default=1)
    parser.add_argument("--eps_end", dest="eps_end", metavar="eps_end", type=float, default=0.01)
    parser.add_argument("--eps_endt", dest="eps_endt", metavar="eps_endt", type=int, default=int(1e6))
    parser.add_argument("--lr", dest="lr", metavar="lr", type=float, default=0.00025)
    parser.add_argument("--discount", dest="discount", metavar="discount", type=float, default=0.99)


    parser.add_argument("--agent_type", dest="agent_type", metavar="agent_type", default="DQN")
    parser.add_argument("--max_steps", dest="max_steps", metavar="max_steps", type=int, default=int(5e6))
    parser.add_argument("--start_steps", dest="start_steps", metavar="start_steps", type=int, default=0)

    parser.add_argument("--save_freq", dest="save_freq", metavar="save_freq", type=int, default=1000)
    parser.add_argument("--eval_freq", dest="eval_freq", metavar="eval_freq", type=int, default=50000)
    parser.add_argument("--eval_steps", dest="eval_steps", metavar="eval_steps", type=int, default=50000)

    parser.add_argument("--max_eval_iter", dest="max_eval_iter", metavar="max_eval_iter", type=int, default=10000)

    parser.add_argument("--pretrained_dir", dest="pretrained_dir", metavar="pretrained_dir", type=str, default=None)
    # parser.add_argument("--out_dir", dest="out_dir", metavar="out_dir", type=str, default=None, required=True)



    return parser


def unpack_batch(batch, device='cpu'):
    states = []
    actions = []
    next_states = []
    rewards = []
    dones = []
    masks = []

    for exp in batch:
        states.append(exp[0])
        actions.append(exp[1])
        next_states.append(exp[4])
        rewards.append(exp[2])
        dones.append(exp[3])
        masks.append(exp[7])

    states_t = torch.tensor(np.array(states), dtype=torch.float).to(device=device)
    actions_t = torch.tensor(np.array(actions), dtype=torch.long).to(device=device).unsqueeze(1)
    rewards_t = torch.tensor(np.array(rewards), dtype=torch.float).to(device=device)
    next_states_t = torch.tensor(np.array(next_states), dtype=torch.float).to(device=device)
    dones_t = torch.tensor(np.array(dones), dtype=torch.long).to(device=device).unsqueeze(1)
    masks_t = torch.tensor(np.array(masks), dtype=torch.float).to(device=device)


    return states_t, actions_t, rewards_t, next_states_t, dones_t, masks_t