import os.path
import time
import numpy as np
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import ptan
import logging
from logging.handlers import RotatingFileHandler


def setup_logger(save_path):
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    handler = RotatingFileHandler(os.path.join(save_path, 'train.log'), maxBytes=1024 * 1024, backupCount=2)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


def unpack_batch_sac_q(batch, device="cpu"):
    '''
    解压深度确定性策略梯度网络的数据
    '''

    states, actions, rewards, dones, last_states = [], [], [], [], []
    for exp in batch:
        states.append(np.array(exp.state))
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        if exp.last_state is None:
            last_states.append(np.array(exp.state))
        else:
            last_states.append(np.array(exp.last_state))
    states_v = ptan.agent.float32_preprocessor(states).to(device)
    # 在这里，如果[]里面是[tensor()...]，那么在这里转换的时候是没办法自动识别正确的维度
    # 原本应该是(batch_size, 1)， 结果被推到成(batch_size,)，造成后续的torch cat错误
    actions_v = torch.LongTensor(actions).to(device)
    rewards_v = ptan.agent.float32_preprocessor(rewards).to(device)
    last_states_v = ptan.agent.float32_preprocessor(last_states).to(device)
    dones_t = torch.BoolTensor(dones).to(device)
    return states_v, actions_v, rewards_v, dones_t, last_states_v


def kl_divergence_with_logits(target_logits, prediction_logits):
    """Implementation of on-policy distillation loss."""
    out = -F.softmax(target_logits, dim=-1) * (F.log_softmax(prediction_logits, dim=-1) - F.log_softmax(target_logits, dim=-1))
    return torch.sum(out)


class DecayActionSelector(ptan.actions.ActionSelector):
    """
    Converts probabilities of actions into action by sampling them
    概率动作选择器
    """
    def __init__(self, start_epsilon=0.9, final_epsilon=0.0005, decay_epsilon=1e-8, start_action_selector=ptan.actions.ProbabilityActionSelector(), final_action_selector=ptan.actions.ArgmaxActionSelector()):
        self.start_epsilon = start_epsilon
        self.final_epsilon = final_epsilon
        self.cur_epsilon = self.start_epsilon
        self.decay_epsilon = decay_epsilon
        self.start_selector = start_action_selector
        self.final_selector = final_action_selector
        self.steps = 0

    def __call__(self, probs):
        assert isinstance(probs, np.ndarray)
        self.cur_epsilon = max(self.final_epsilon, self.start_epsilon - self.decay_epsilon * self.steps)
        self.steps += 1

        if self.cur_epsilon < self.final_epsilon:
            return self.final_selector(probs)
        elif np.random.random() > self.cur_epsilon:
            return self.start_selector(probs)
        else:
            actions = []
            for prob in probs:
                actions.append(np.random.randint(0, len(prob)))
            # 根据传入的动作，随机选择动作，而不是用概率选择
            return np.array(actions)

    def state_dict(self):
        return {
            "cur_epsilon": self.cur_epsilon,
            "steps": self.steps
        }

    def load_state_dict(self, state_dict):
        self.cur_epsilon = state_dict["cur_epsilon"]
        self.steps = state_dict["steps"]


def save_model(model_name, loss, best_loss, model):
    if not os.path.exists("saves"):
        os.makedirs("saves")

    if loss < best_loss:
        torch.save(model, f'saves/best_model_{model_name}_{best_loss}.dat')
        best_loss = loss

    torch.save(model, f'saves/model_{model_name}.dat')

    return best_loss

def unpack_batch(batch):
    '''
    将经验重放缓冲区的经验进行解包操作，从经验中获取各个经验类别
    '''

    # states 每次采集样本时的环境状态
    # actions 每次状态下执行的动作值
    # rewards 每次执行完动作后获取的激励值
    # dones 执行动作后游戏是否结束
    # last_states 执行动作之后的下一个状态
    #
    # return 将states, actions, rewards, dones, last_states各个list转换为numpy
    states, actions, rewards, dones, last_states = [], [], [], [], []
    for exp in batch:
        state = np.asarray(exp.state)
        states.append(state)
        actions.append(exp.action)
        rewards.append(exp.reward)
        dones.append(exp.last_state is None)
        if exp.last_state is None:
            last_states.append(state)       # the result will be masked anyway
        else:
            last_states.append(np.asarray(exp.last_state))
    return np.asarray(states), np.array(actions), np.array(rewards, dtype=np.float32), \
           np.array(dones, dtype=np.uint8), np.asarray(last_states)



def calc_loss_dqn(batch, net, tgt_net, gamma, device="cpu"):
    '''
    计算dqn的损失值
    net: 负责计算当前状态下的动作Q值
    tag_net: 负责计算下一个状态下的动作Q值
    '''
    # 提取样本集中游戏的各个状态和动作
    states, actions, rewards, dones, next_states = unpack_batch(batch)

    # 将数据传输到指定的设备中
    states_v = torch.tensor(states).to(device)
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.ByteTensor(dones).to(device)

    # 将状态传输到神经网路中，获取到神经网路推理出来的执行的动作
    # 网络细节查看第06章 02_dqn_pong.py
    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    # tag_net负责计算下一个状态的Q值
    # 并将其中会导致游戏结束的动作的Q值设置为0，这样可以将不好的q值降低
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask.bool()] = 0.0

    # 损失值计算：下一个状态的最大Q值+实际获取到的激励值 == 当前net计算出来的Q值
    expected_state_action_values = next_state_values.detach() * gamma + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)


def calc_q_lambda_returns(batch, net, gamma, q_lambda, device="cpu"):
    """
    todo 这里计算和原来的dqn计算q值有什么不同的地方
    计算q-lambda返回值，与原始代码逻辑相同，但使用与calc_q_dqn相似的代码结构。

    参数:
        batch: 包含一条或多条轨迹样本的batch数据
        net: 用于计算Q值的神经网络
        gamma: 折扣因子
        q_lambda: lambda参数 todo 这个参数的作用？和原来的bellman的计算公式有什么区别？
        device: 设备类型 (如"cpu"或"cuda")

    返回:
        returns: 对应每个时间步计算好的q-lambda返回值（张量）
    """
    with torch.no_grad():
        # 从batch中解压出状态、动作、奖励、完成标记以及下一状态
        states, actions, rewards, dones, next_states = unpack_batch(batch)

        # 转换为tensor并传到指定设备
        states_v = torch.tensor(states, dtype=torch.float32).to(device)
        actions_v = torch.tensor(actions, dtype=torch.long).to(device)
        rewards_v = torch.tensor(rewards, dtype=torch.float32).to(device)
        dones_v = torch.tensor(dones, dtype=torch.float32).to(device)
        next_states_v = torch.tensor(next_states, dtype=torch.float32).to(device)

        # 计算当前状态和下一状态的Q值
        q_values = net(states_v)  # shape: [batch_size, num_actions]
        next_q_values = net(next_states_v)  # shape: [batch_size, num_actions]

        values = q_values.gather(1, torch.argmax(q_values, dim=1).unsqueeze(1)).squeeze(-1)  # shape: [batch_size]

        # 计算下一状态的最大Q值，用于t == num_steps - 1的情况
        next_max_values, _ = torch.max(next_q_values, dim=-1)

        # 假设batch中的数据是按时间序列的顺序堆叠的（例如来自同一条轨迹的连续steps）
        # 我们需要逆序计算returns，因此确保batch中数据的排列是时间顺序的。
        # returns和values的shape与rewards一致。
        returns = torch.zeros_like(rewards_v).to(device)

        # 最后一个时刻的next_done是dones_v[-1]
        # nextnonterminal = 1 - next_done
        # 对应原代码中的 if t == args.num_steps - 1:
        next_done = dones_v[-1]
        nextnonterminal = 1.0 - next_done
        returns[-1] = rewards_v[-1] + gamma * next_max_values[-1] * nextnonterminal

        # 逆序计算其它时刻的return
        # 对应原代码：
        # returns[t] = rewards[t] + gamma * (
        #   q_lambda * returns[t+1] + (1 - q_lambda) * next_value * nextnonterminal
        # )
        # 其中 next_value = values[t+1]
        for t in reversed(range(len(rewards_v) - 1)):
            nextnonterminal = 1.0 - dones_v[t + 1]
            next_value = values[t + 1]
            # todo 这个是在计算什么？我如果还原回原来的dqn的计算公式，是否可以？
            returns[t] = rewards_v[t] + gamma * (
                        q_lambda * returns[t + 1] + (1 - q_lambda) * next_value * nextnonterminal)

    return returns


class EpsilonTracker:
    def __init__(self, epsilon_greedy_selector, epsilon_start, epsilon_final, epsilon_frames):
        '''
        这个跟踪器是根据当前执行的帧数大小，以及epslison的参数来计算动作选择其中
        选择动作的概率

        param epsilon_greedy_selector: 动作选择器
        param params: map类型的参数，也就是本文中的HYPERPARAMS参数
        '''

        self.epsilon_greedy_selector = epsilon_greedy_selector
        self.epsilon_start = epsilon_start
        self.epsilon_final = epsilon_final
        self.epsilon_frames = epsilon_frames
        self.frame(0)

    def frame(self, frame):
        '''
        根据当前的帧数，更新epsilon的值，eplison是用来决定每次执行的策略是
        随机还是神经网络推理获取的
        '''
        self.epsilon_greedy_selector.epsilon = \
            max(self.epsilon_final, self.epsilon_start - frame / self.epsilon_frames)
        

"""
该类就是用来跟踪、记录、判断激励的追踪类
"""
class RewardTracker:
    def __init__(self, writer, stop_reward, logger=None):
        '''
        param writer: tensorboard writer保存
        param stop_reward: 停止训练的激励值\目标值
        '''

        self.writer = writer
        self.stop_reward = stop_reward
        self.logger = logger

    def __enter__(self):
        self.ts = time.time()
        self.ts_frame = 0
        # total_rewards 训练期间的每一步的激励值，用来记录
        self.total_rewards = []
        return self

    def __exit__(self, *args):
        self.writer.close()

    def reward(self, reward, frame, epsilon=None):
        '''
        param reward: 样本
        param fream: 当前进行了第frame次的训练
        param epsilon：当前的epsilon值

        return True: 表示已经达到了目标激励值 False： 表示还没有达到目标的激励值
        '''
        # 激励经验存储在总缓存区
        self.total_rewards.append(reward)
        # 计算当前的平均帧率
        speed = (frame - self.ts_frame) / (time.time() - self.ts + 1e-9)
        # 将当前帧总数和所花费的时间存储在缓存中
        self.ts_frame = frame
        self.ts = time.time()
        # 计算平均激励值
        mean_reward = np.mean(self.total_rewards[-100:])
        epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
        print("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (
            frame, len(self.total_rewards), mean_reward, speed, epsilon_str
        ))
        if self.logger is not None:
            self.logger.info("%d: done %d games, mean reward %.3f, speed %.2f f/s%s" % (frame, len(self.total_rewards), mean_reward, speed, epsilon_str))

        sys.stdout.flush()
        if epsilon is not None:
            self.writer.add_scalar("epsilon", epsilon, frame)
        self.writer.add_scalar("speed", speed, frame)
        self.writer.add_scalar("reward_100", mean_reward, frame)
        self.writer.add_scalar("reward", reward, frame)
        # 如果当前获取的激励已经达到了目标的激励大小，则返回true
        if mean_reward > self.stop_reward:
            print("Solved in %d frames!" % frame)
            if self.logger is not None:
                self.logger.info("Solved in %d frames!" % frame)
            return True
        return False


def save_best_model(score, state, save_dir, save_name, keep_best = 5):
    os.makedirs(save_dir, exist_ok=True)

    save_path = os.path.join(save_dir, f'{save_name}_{score}.pth')
    torch.save(state, save_path)

    all_model = sorted(filter(lambda x: "best" in x and "_" in x, os.listdir(save_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_model) > keep_best:
        for old_model in all_model[:-keep_best]:
            os.remove(os.path.join(save_dir, old_model))
    
def save_checkpoints(iter, state, checkpoint_dir, save_name, keep_last=5):
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    checkpoint_path = os.path.join(checkpoint_dir, f'{save_name}_epoch_{iter}.pth')
    torch.save(state, checkpoint_path)

    all_checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(checkpoint_dir)), key=lambda x: int(x.split('_')[-1].split('.')[0]))
    if len(all_checkpoints) > keep_last:
        for old_checkpoint in all_checkpoints[:-keep_last]:
            os.remove(os.path.join(checkpoint_dir, old_checkpoint))