import torch
from torch.distributions import Categorical
from torch.nn import functional as F


# deterministic训练的时候保持探索
# 每次只为一个智能体采样动作，后一个智能体的输入依赖前面所有智能体的动作（通过shifted_action传递）。（获取结果的时候采用）
def discrete_autoregressive_act(decoder, obs_rep, obs, batch_size, n_agent, action_dim, tpdv,
                                available_actions=None, deterministic=False):
    # 一个智能体用特殊token（如全1的one-hot），后续用前一个智能体的动作one-hot编码、保证每个智能体的动作生成都能“看到”前面智能体的动作。
    shifted_action = torch.zeros((batch_size, n_agent, action_dim + 1)).to(**tpdv)
    shifted_action[:, 0, 0] = 1
    output_action = torch.zeros((batch_size, n_agent, 1), dtype=torch.long)
    output_action_log = torch.zeros_like(output_action, dtype=torch.float32)

    for i in range(n_agent):
        logit = decoder(shifted_action, obs_rep, obs)[:, i, :]
        if available_actions is not None:
            logit[available_actions[:, i, :] == 0] = -1e10

        distri = Categorical(logits=logit)
        action = distri.probs.argmax(dim=-1) if deterministic else distri.sample()
        action_log = distri.log_prob(action)

        output_action[:, i, :] = action.unsqueeze(-1)
        output_action_log[:, i, :] = action_log.unsqueeze(-1)
        if i + 1 < n_agent:
            shifted_action[:, i + 1, 1:] = F.one_hot(action, num_classes=action_dim)

    return output_action, output_action_log


# 并行获取离散动作(训练的时候采用）
def discrete_parallel_act(decoder, obs_rep, obs, action, batch_size, n_agent, action_dim, tpdv,
                          available_actions=None):
    # 对动作进行one-hot编码，得到形状为 (batch, n_agent, action_dim) 的张量
    one_hot_action = F.one_hot(action.squeeze(-1), num_classes=action_dim)  # (batch, n_agent, action_dim)

    # 创建一个全零张量，形状为 (batch_size, n_agent, action_dim + 1)，用于存放“移位”后的动作
    shifted_action = torch.zeros((batch_size, n_agent, action_dim + 1)).to(**tpdv)
    # 对第一个智能体的第一个动作位置赋值为1，表示初始动作（如no-op或起始token）
    shifted_action[:, 0, 0] = 1
    # 对后续智能体，将前一个智能体的动作one-hot编码“移位”到当前智能体
    shifted_action[:, 1:, 1:] = one_hot_action[:, :-1, :]
    #  将移位后的动作、观测嵌入和原始观测输入到解码器，得到动作logits
    logit = decoder(shifted_action, obs_rep, obs)
    if available_actions is not None:
        logit[available_actions == 0] = -1e10

    distri = Categorical(logits=logit)
    action_log = distri.log_prob(action.squeeze(-1)).unsqueeze(-1)
    entropy = distri.entropy().unsqueeze(-1)
    return action_log, entropy
