import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from einops import rearrange
from lib import common_trxl as common



class AgentPPOTrxl(ptan.agent.BaseAgent):
    def __init__(self, net, memory_indices, memory_mask, max_episode_steps, param, action_shape, device="cpu"):

        self.net = net
        self.memory_indices = memory_indices
        self.memory_mask = memory_mask
        self.device = device
        self.max_episode_steps = max_episode_steps
        self.trxl_num_layers = param['trxl_num_layers']
        self.trxl_dim = param['trxl_dim']
        self.action_shape = action_shape
        self.device = device
        # 这里存储着每个环境每步的掩码
        self.log_probs = torch.zeros((param['num_steps'], len(self.action_shape)))
        self.values = torch.zeros((param['num_steps'],))
        self.tarj_num_steps = param['num_steps']
        self.tarj_step = 0
        self.trxl_memory_length = param['trxl_memory_length']
        self.stored_memories = []
        # Index to select the correct episode memory from stored_memories
        # 记录当done时，所存储的memory在stored_memories中的索引
        self.stored_memory_index = torch.zeros((param['num_steps']), dtype=torch.long)
        self.first_step = True


    def initial_state(self):
        # 统一接口，但是这里不需要使用，返回None而不是使用pass
        if self.tarj_step < self.tarj_num_steps and not self.first_step:
            mem_index = self.stored_memory_index[self.tarj_step]
            self.stored_memories[mem_index] = self.stored_memories[mem_index].clone()
            
            if self.tarj_step < self.tarj_num_steps:
                self.stored_memory_index[self.tarj_step + 1] = len(self.stored_memories) - 1

        self.first_step = True

        # 这里仅适用env=1的情况
        return (
            torch.zeros((self.max_episode_steps, self.trxl_num_layers, self.trxl_dim), dtype=torch.float32).to(self.device),
            torch.zeros((1,), dtype=torch.long).to(self.device),
            self.log_probs,
            self.values
        )

    @torch.no_grad()
    def __call__(self, states, agent_states):
        '''
        states：当前的环境状态
        agent_states: 内部智能体的状态,之前的代理器里面这个基本无用,一开始的时候，agent_states是空的，但在这里因为要使用OU过程对执行的动作进行噪音干扰，所以需要使用了智能体的内部状态
        '''
        if agent_states is None or agent_states[0] is None:
            agent_states = self.initial_state()


        next_memory = torch.stack([agent_state[0] for agent_state in agent_states])
        env_current_episode_step = agent_states[0][1]

        if self.first_step:
            self.first_step = False
            self.stored_memories.append(next_memory[0])

        # 将环境转换为目标的数据类型
        states_v = ptan.agent.float32_preprocessor(states).to(self.device)

        # torch.clip(env_current_episode_step, 0, args.trxl_memory_length - 1 将env_current_episode_step步数的范围设置为0, args.trxl_memory_length - 1
        # 限制在一个合理的范围内
        # 然后根据env_current_episode_step每个环境执行到的步数提取出对应的掩码，使得环境不会看到未来，
        # 比如某个环境执行到了第4步，那么提取出来的memory_mask的掩码为1, 1, 1, 1, 0, 0
        # 将每步的掩码存储到stored_memory_masks
        # 无论到多少步，掩码的长度都是args.trxl_memory_length，所以无需考虑总体长度，和memory_indices不一样
        stored_memory_masks = self.memory_mask[torch.clip(env_current_episode_step, 0, self.trxl_memory_length - 1)]
                
        # Retrieve the memory window from the entire episodic memory
        # todo 这个是在干嘛
        # 根据env_current_episode_step步数，获取对应的记忆窗口索引（记忆窗口索引存储的是记忆的索引）
        stored_memory_indices= self.memory_indices[env_current_episode_step]
        # Retrieve the memory window from the entire episodic memory
        # todo 这个的作用
        # 根据记忆的索引提取出对应的记忆窗口的记忆，因为有掩码，所以不许担心会提取到不相关的记忆
        memory_window = common.batched_index_select(next_memory, 1, stored_memory_indices)
        
        actions, logprob, _, value, new_memory = self.net.get_action_and_value(
            states_v, memory_window, stored_memory_masks, stored_memory_indices
        )

        # 根据每个环境的id，以及每个环境的当前步数，存储新的记忆
        next_memory[0, env_current_episode_step] = new_memory
        self.log_probs[self.tarj_step] = logprob
        self.values[self.tarj_step] = value

        self.tarj_step = (self.tarj_step + 1) % self.tarj_num_steps
        env_current_episode_step += 1
        
        return actions.squeeze(-1).cpu().numpy(), [(
                next_memory[0],
                env_current_episode_step,
                self.log_probs,
                self.values,
                stored_memory_indices,
                stored_memory_masks
            )]



class CustomLRScheduler:
    """Custom learning rate scheduler that can use arbitrary formulas"""
    def __init__(self, optimizer, init_lr, final_lr, anneal_steps, current_step=0):
        self.optimizer = optimizer
        self.init_lr = init_lr
        self.final_lr = final_lr
        self.anneal_steps = anneal_steps
        self.current_step = current_step

    
    def state_dict(self):
        return {
            'init_lr': self.init_lr,
            'final_lr': self.final_lr,
            'anneal_steps': self.anneal_steps,
            'current_step': self.current_step,
        }
    

    def load_state_dict(self, state_dict):
        self.init_lr = state_dict['init_lr']
        self.final_lr = state_dict['final_lr']
        self.anneal_steps = state_dict['anneal_steps']
        self.current_step = state_dict['current_step']

        
    def step(self):
        """Update learning rate based on current step"""
        self.current_step += 1
        lr = self.calculate_lr()
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr

            
    def calculate_lr(self):
        """Calculate learning rate using custom formula
        Override this method to implement custom scheduling
        """
        # Annealing the learning rate and entropy coefficient if instructed to do so
        # 计算是否开启学习率动态调整 以及 是否已训练步数超过了最大学习率训练步数
        do_anneal = self.anneal_steps > 0 and self.current_step < self.anneal_steps
        # 如果超过了则学习率比率为0
        # 否则则根据1 - 当前步数/最大学习率调整步数的比率 计算 学习率比率
        frac = 1 - self.current_step / self.anneal_steps if do_anneal else 0
        # 计算当前的学习率
        lr = (self.init_lr - self.final_lr) * frac + self.final_lr

        return lr
    

class CustomEntCoefcheduler:
    """Custom learning rate scheduler that can use arbitrary formulas"""
    def __init__(self, init_ent_coef, final_ent_coef, anneal_steps, current_step=0):
        self.init_ent_coef = init_ent_coef
        self.final_ent_coef = final_ent_coef
        self.anneal_steps = anneal_steps
        self.current_step = current_step
        self.ent_coef = init_ent_coef

    
    def state_dict(self):
        return {
            'init_ent_coef': self.init_ent_coef,
            'final_ent_coef': self.final_ent_coef,
            'anneal_steps': self.anneal_steps,
            'current_step': self.current_step,
        }
    

    def load_state_dict(self, state_dict):
        self.init_ent_coef = state_dict['init_ent_coef']
        self.final_ent_coef = state_dict['final_ent_coef']
        self.anneal_steps = state_dict['anneal_steps']
        self.current_step = state_dict['current_step']

        
    def step(self):
        """Update learning rate based on current step"""
        self.current_step += 1
        # Annealing the learning rate and entropy coefficient if instructed to do so
        # 计算是否开启学习率动态调整 以及 是否已训练步数超过了最大学习率训练步数
        do_anneal = self.anneal_steps > 0 and self.current_step < self.anneal_steps
        # 如果超过了则学习率比率为0
        # 否则则根据1 - 当前步数/最大学习率调整步数的比率 计算 学习率比率
        frac = 1 - self.current_step / self.anneal_steps if do_anneal else 0
        self.ent_coef = (self.init_ent_coef - self.final_ent_coef) * frac + self.final_ent_coef

    
    def get_ent_coef(self):
        return self.ent_coef

        
        


def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
    torch.nn.init.orthogonal_(layer.weight, std)
    # torch.nn.init.constant_(layer.bias, bias_const)
    return layer


class PositionalEncoding(nn.Module):
    def __init__(self, dim, min_timescale=2.0, max_timescale=1e4):
        super().__init__()
        freqs = torch.arange(0, dim, min_timescale)
        inv_freqs = max_timescale ** (-freqs / dim)
        self.register_buffer("inv_freqs", inv_freqs)

    def forward(self, seq_len):
        seq = torch.arange(seq_len - 1, -1, -1.0).to(device=self.inv_freqs.device)
        sinusoidal_inp = rearrange(seq, "n -> n ()") * rearrange(self.inv_freqs, "d -> () d")
        pos_emb = torch.cat((sinusoidal_inp.sin(), sinusoidal_inp.cos()), dim=-1)
        return pos_emb
    

class MultiHeadAttention(nn.Module):
    """Multi Head Attention without dropout inspired by https://github.com/aladdinpersson/Machine-Learning-Collection"""

    def __init__(self, embed_dim, num_heads):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        # 每个头的维度
        self.head_size = embed_dim // num_heads

        # 这里必须要整除
        assert self.head_size * num_heads == embed_dim, "Embedding dimension needs to be divisible by the number of heads"

        self.values = nn.Linear(self.head_size, self.head_size, bias=False)
        self.keys = nn.Linear(self.head_size, self.head_size, bias=False)
        self.queries = nn.Linear(self.head_size, self.head_size, bias=False)
        self.fc_out = nn.Linear(self.num_heads * self.head_size, embed_dim)

    def forward(self, values, keys, query, mask):
        '''
        
        values: 形状为 (N, value_len, embed_dim)
        keys: 形状为 (N, key_len, embed_dim)
        query: 形状为 (N, query_len, embed_dim)

        mask: 形状为 (N, query_len, key_len) 

        return shape

        '''

        N = query.shape[0]
        # todo 这些len是输入数据的长度吗？
        value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]

        # todo 输入shape （？）to （batch_size, seq_len, num_heads, head_size)
        values = values.reshape(N, value_len, self.num_heads, self.head_size)
        keys = keys.reshape(N, key_len, self.num_heads, self.head_size)
        query = query.reshape(N, query_len, self.num_heads, self.head_size)

        # 提取 q k v的特征
        values = self.values(values)  # (N, value_len, heads, head_dim)
        keys = self.keys(keys)  # (N, key_len, heads, head_dim)
        queries = self.queries(query)  # (N, query_len, heads, heads_dim)

        # Dot-product
        '''
        [n, q, h, d]:
        n: batch size
        q: query长度
        h: 注意力头数
        d: 每个头的维度

        [n, k, h, d]:
        n: batch size
        k: key长度
        h: 注意力头数
        d: 每个头的维度

        输出张量维度[n, h, q, k]:
        n: batch size
        h: 注意力头数
        q: query长度
        k: key长度

        这个操作实际上在计算注意力机制中的查询(query)和键(key)的点积，具体来说：
        对每个batch(n)
        对每个注意力头(h)
        计算query和key在维度d上的点积
        得到query和key之间的注意力分数矩阵

        output = torch.zeros(n, h, q, k)
        for n_idx in range(n):
            for h_idx in range(h):
                for q_idx in range(q):
                    for k_idx in range(k):
                        for d_idx in range(d):
                            output[n_idx, h_idx, q_idx, k_idx] += \
                                queries[n_idx, q_idx, h_idx, d_idx] * keys[n_idx, k_idx, h_idx, d_idx]

        实际上就是简化操作，告诉方法中输入的两个张量每个维度的标识，输出张量的维度标识是什么样子的，从而自动生成对应的计算流程
        如果不使用这个，那么就要手动先
        交换q\k和h的维度，然后在计算q和k的乘积时，需要交换k的k d维度，使得最后计算输出能够得到nhqk
        '''
        # energy 张量形状为 [n, h, q, k]
        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        # Mask padded indices so their attention weights become 0
        if mask is not None:
            # mask.unsqueeze(1).unsqueeze(1) == 0  # 增加维度使其匹配 energy 的形状
            # 以下操作会将energy中对应mask对应位置为0 设置为-1e20
            # 在后续的 softmax 操作中，这些位置会变成接近 0 的值
            # 实际上就是让模型在这些位置的注意力权重趋近于 0
            energy = energy.masked_fill(mask.unsqueeze(1).unsqueeze(1) == 0, float("-1e20"))  # -inf causes NaN

        # Normalize energy values and apply softmax to retrieve the attention scores
        # 先正则化，然后对最后一个维度进行softmax计算
        attention = torch.softmax(
            energy / (self.embed_dim ** (1 / 2)), dim=3
        )  # attention shape: (N, heads, query_len, key_len)

        # Scale values by attention weights
        # 依旧用相同的方法计算
        # attention shape: (N, heads, query_len, key_len)
        # values shape:  (N, value_len/seq_len, heads, head_dim)
        # 首先Value 维度变为：(N, heads，value_len/seq_len, head_dim)
        # 然后attention和value相乘后shape：(N, heads, query_len, head_dim)  因为query_len = value_lan = key_len 
        # 交换维度(N, query_len, heads，head_dim) 
        # torch.einsum("nhql,nlhd->nqhd", [attention, values]) shape （N, query_len/seq_len, heads, head_dim）
        # reshape shape (N, query_len/seq_len, heads * head_dim = embed_dim)
        out = torch.einsum("nhql,nlhd->nqhd", [attention, values]).reshape(N, query_len, self.num_heads * self.head_size)

        # 进一步对特征进行提取
        # out shape (N, query_len/seq_len, embed_dim)
        # attention shape (N, heads, query_len, key_len)
        return self.fc_out(out), attention
    


# 这里仅仅只是trandformer的Encoder部分
class TransformerLayer(nn.Module):
    def __init__(self, dim, num_heads):
        '''
        dim: transformer的维度
        num_heads: transformer的注意力头数
        '''
        super().__init__()
        self.attention = MultiHeadAttention(dim, num_heads)
        self.layer_norm_q = nn.LayerNorm(dim)
        self.norm_kv = nn.LayerNorm(dim)
        self.layer_norm_attn = nn.LayerNorm(dim)
        self.fc_projection = nn.Sequential(nn.Linear(dim, dim), nn.ReLU())

    def forward(self, value, key, query, mask):
        '''
        return out和注意力权重
        out: 形状为 (N, query_len/seq_len, dim)
        attention_weights: 形状为 (N, heads, query_len, key_len)
        '''
        # Pre-layer normalization (post-layer normalization is usually less effective)
        # 先进行归一哈
        query_ = self.layer_norm_q(query)
        value = self.norm_kv(value)
        key = value  # K = V -> self-attention
        # 然后进行注意力计算
        # k等于v，使用q来计算注意力
        attention, attention_weights = self.attention(value, key, query_, mask)  # MHA
        # 这里实现的resnet连接
        x = attention + query  # Skip connection
        # 对结果进行归一化
        x_ = self.layer_norm_attn(x)  # Pre-layer normalization
        # 对输出的结果进行特征提取
        forward = self.fc_projection(x_)  # Forward projection
        # 又是resnet连接
        out = forward + x  # Skip connection
        return out, attention_weights



class Transformer(nn.Module):
    def __init__(self, num_layers, dim, num_heads, max_episode_steps, positional_encoding):
        '''
        num_layers: transformer层的数量
        dim: transformer的维度
        num_heads: transformer的注意力头数
        max_episode_steps: 最大步数 todo 用于什么
        positional_encoding: 位置编码的类型
        '''
        super().__init__()
        self.max_episode_steps = max_episode_steps
        self.positional_encoding = positional_encoding
        if positional_encoding == "absolute":
            # 这里应该是绝对位置编码
            self.pos_embedding = PositionalEncoding(dim)
        elif positional_encoding == "learned":
            # 这里应该是可以训练学习的位置编码器 todo
            # torch.randn(max_episode_steps, dim) shape ： (max_episode_steps, dim)
            self.pos_embedding = nn.Parameter(torch.randn(max_episode_steps, dim))
        
        # 创建transformer层
        self.transformer_layers = nn.ModuleList([TransformerLayer(dim, num_heads) for _ in range(num_layers)])

    def forward(self, x, memories, mask, memory_indices):
        '''
        param x: 经过encoder后的特征
        param memories: transformer的记忆 todo
        memories的第三个维度的大小和transformer的层数相同  todo

        param mask: transformer的掩码
        param memory_indices: transformer的记忆索引 todo
        '''
        # Add positional encoding to every transformer layer input
        if self.positional_encoding == "absolute":
            # 这里是绝对位置编码，self.pos_embedding(self.max_episode_steps)得到的是一个位置编码矩阵
            # [memory_indices] 然后根据索引取出对应的位置编码
            pos_embedding = self.pos_embedding(self.max_episode_steps)[memory_indices]
            # 将位置编码添加到记忆中 todo 这里的记忆是什么
            memories = memories + pos_embedding.unsqueeze(2)
        elif self.positional_encoding == "learned":
            # 这里是学习的位置编码
            # 直接根据索引取出对应的位置编码
            memories = memories + self.pos_embedding[memory_indices].unsqueeze(2)

        # Forward transformer layers and return new memories (i.e. hidden states)
        # 存储transformer每一层的输入特征x
        out_memories = []
        # 遍历每一个transformer层
        for i, layer in enumerate(self.transformer_layers):
            out_memories.append(x.detach())
            # 将最新的观察作为query
            # 历史记录作为key和value
            # 传入transformer层进行计算
            # mask表示当前的步数是否在记忆范围内
            # 得到最新的特征x
            x, attention_weights = layer(
                memories[:, :, i], memories[:, :, i], x.unsqueeze(1), mask
            )  # args: value, key, query, mask
            x = x.squeeze()
            if len(x.shape) == 1:
                x = x.unsqueeze(0)
        # 返回最终的特征，以及每一层的输入特征
        return x, torch.stack(out_memories, dim=1)
    


class PPOTrxlModel(nn.Module):
    def __init__(self, params, observation_space, action_shape, max_episode_steps):
        '''
        
        params: 命令行参数
        observation_space: 观察空间
        action_space_shape: 动作空间的形状
        max_episode_steps: 最大步数
        '''
        super().__init__()
        self.obs_shape = observation_space
        self.max_episode_steps = max_episode_steps

        if len(self.obs_shape) > 1:
            # 这里是处理像素观察
            # 输出的shape 是（batch_size, trxl_dim）
            self.encoder = nn.Sequential(
                layer_init(nn.Conv2d(self.obs_shape[0], 32, 8, stride=4)),
                nn.ReLU(),
                layer_init(nn.Conv2d(32, 64, 4, stride=2)),
                nn.ReLU(),
                layer_init(nn.Conv2d(64, 64, 3, stride=1)),
                nn.ReLU(),
                nn.Flatten(),
                layer_init(nn.Linear(64 * 7 * 7, params['trxl_dim'])),
                nn.ReLU(),
            )
        else:
            # 这里是处理非像素观察
            # 输出的shape 是（batch_size, trxl_dim）
            self.encoder = layer_init(nn.Linear(observation_space.shape[0], params['trxl_dim']))

        # 创建transformer层
        self.transformer = Transformer(
            params['trxl_num_layers'], params['trxl_dim'], params['trxl_num_heads'], self.max_episode_steps, params['trxl_positional_encoding']
        )

        # todo 这里的作用是什么？
        self.hidden_post_trxl = nn.Sequential(
            layer_init(nn.Linear(params['trxl_dim'], params['trxl_dim'])),
            nn.ReLU(),
        )

        # 这里对于每个动作空间的维度创建一个actor # todo 了解这里的
        self.actor_branches = nn.ModuleList(
            [
                layer_init(nn.Linear(params['trxl_dim'], out_features=num_actions), np.sqrt(0.01))
                for num_actions in action_shape
            ]
        )

        # 创建观察特征评价网络
        self.critic = layer_init(nn.Linear(params['trxl_dim'], 1), 1)

        # 这里的网络的作用是什么？
        # 这里如果有开启重构观察算是的话
        # 则创建将特征重新转换为观察的网络
        # 应该是利于特征提取时提取关键的特征，而不要无用的特征
        if params['reconstruction_coef'] > 0.0:
            self.transposed_cnn = nn.Sequential(
                layer_init(nn.Linear(params['trxl_dim'], 64 * 7 * 7)),
                nn.ReLU(),
                nn.Unflatten(1, (64, 7, 7)),
                layer_init(nn.ConvTranspose2d(64, 64, 3, stride=1)),
                nn.ReLU(),
                layer_init(nn.ConvTranspose2d(64, 32, 4, stride=2)),
                nn.ReLU(),
                layer_init(nn.ConvTranspose2d(32, 3, 8, stride=4)),
                nn.Sigmoid(),
            )

    def get_value(self, x, memory, memory_mask, memory_indices):
        '''
        param x: 观察值obs
        param memory: 最新的trxl_memory_length长度的记忆
        param memory_mask: 对应的以及掩码
        param memory_indices: 轨迹中最后一次的记忆对应的索引，对应x obs来说

        return: 评价
        '''

        if len(self.obs_shape) > 1:
            x = self.encoder(x / 255.0)
        else:
            x = self.encoder(x)
        x, _ = self.transformer(x, memory, memory_mask, memory_indices)
        x = self.hidden_post_trxl(x)
        return self.critic(x).flatten()

    def get_action_and_value(self, x, memory, memory_mask, memory_indices, action=None):
        '''
        param x: 观察值obs
        param memory: transformer的记忆 todo
        param memory_mask: transformer的掩码
        param memory_indices: transformer的记忆索引 todo
        param action: 动作或为None

        return:
        输入的的动作或者随机采样的动作
        动作的log概率
        动作的熵
        价值函数的值（根据最终transformer的输出计算）
        transformer的记忆：根据transormer内部计算可以看出，存储transformer每一层的输入特征x，如第一层经过encoder后的特征，第二层是经过第一层transformer后的特征
        '''
        # 首先提取观察的特征
        if len(self.obs_shape) > 1:
            x = self.encoder(x / 255.0)
        else:
            x = self.encoder(x)
        # 返回最终的特征，以及每一层的输入特征
        x, memory = self.transformer(x, memory, memory_mask, memory_indices)
        # todo 这里的作用是什么？恒等计算？特征提取
        x = self.hidden_post_trxl(x)
        self.x = x
        # 对特征计算动作概率
        probs = [Categorical(logits=branch(x)) for branch in self.actor_branches]
        if action is None:
            # 如果输入的动作未指定，那么就随机采样一个动作
            action = torch.stack([dist.sample() for dist in probs], dim=1)
        # 计算动作的log概率，用于后续的动作熵计算
        log_probs = []
        for i, dist in enumerate(probs):
            log_probs.append(dist.log_prob(action[:, i]))
        entropies = torch.stack([dist.entropy() for dist in probs], dim=1).sum(1).reshape(-1)
        return action, torch.stack(log_probs, dim=1), entropies, self.critic(x).flatten(), memory
    

    def get_action(self, x, memory, memory_mask, memory_indices):
        # 首先提取观察的特征
        if len(self.obs_shape) > 1:
            x = self.encoder(x / 255.0)
        else:
            x = self.encoder(x)
        # 返回最终的特征，以及每一层的输入特征
        x, memory = self.transformer(x, memory, memory_mask, memory_indices)
        # todo 这里的作用是什么？恒等计算？特征提取
        x = self.hidden_post_trxl(x)
        self.x = x
        # 对特征计算动作概率
        probs = [Categorical(logits=branch(x)) for branch in self.actor_branches]
        
        action = torch.stack([dist.probs.argmax(dim=-1) for dist in probs], dim=1)
        
        return action, memory


    def reconstruct_observation(self):
        '''
        看起来很像dreamer的重建观察
        将特征重新还原为obs观察像素
        '''
        x = self.transposed_cnn(self.x)
        return x.permute((0, 2, 3, 1))