import torch
import torch.nn as nn
import torch.nn.functional as F


class MultiHeadAttention(nn.Module):
    def __init__(self, embed_size, heads):
        super(MultiHeadAttention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        assert self.head_dim * heads == embed_size, "Embedding size must be divisible by number of heads"

        # 查询、键和值的线性变换
        self.wq = nn.Linear(embed_size, embed_size)
        self.wk = nn.Linear(embed_size, embed_size)
        self.wv = nn.Linear(embed_size, embed_size)

        self.k_cache = None
        self.v_cache = None

        # 最终输出的线性变换
        self.fc_out = nn.Linear(embed_size, embed_size)

    def split_heads(self, x):
        # 拆分为多个头
        batch_size = x.shape[0]
        x = x.view(batch_size, -1, self.heads, self.head_dim)
        return x.permute(0, 2, 1, 3)  # 变为 (batch_size, heads, seq_len, head_dim)

    def forward(self, x: torch.Tensor, mask=None, use_kv_cache=True):
        xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
        # batch_size = query.shape[0]
        batch_size, seq_length, embed_size = x.shape

        if use_kv_cache and not self.training:
        # if use_kv_cache and self.eval():
            print('===== eval')
            if self.k_cache is None or self.k_cache.shape[1] != x.shape[1] - 1:
                xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)  # 计算 Q, K, V
            else:
                token = x[:, -1:, :]  # 获取最后一个 token
                xq = torch.cat((torch.zeros_like(x[:, :-1, :]), self.wq(token)), dim=1)  # 更新 Q
                xk = torch.cat((self.k_cache, self.wk(token)), dim=1)  # 更新 K
                xv = torch.cat((self.v_cache, self.wv(token)), dim=1)  # 更新 V
            self.k_cache, self.v_cache = xk, xv  # 更新 KV 缓存

        # if use_kv_cache and self.eval():  # 如果使用 KV 缓存且在评估模式下
        #     if self.k_cache is None or self.k_cache.shape[1] != x.shape[1] - 1:
        #         xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)  # 计算 Q, K, V
        #     else:
        #         token = x[:, -1:, :]  # 获取最后一个 token
        #         xq = torch.cat((torch.zeros_like(x[:, :-1, :]), self.wq(token)), dim=1)  # 更新 Q
        #         xk = torch.cat((self.k_cache, self.wk(token)), dim=1)  # 更新 K
        #         xv = torch.cat((self.v_cache, self.wv(token)), dim=1)  # 更新 V
        #
        #     self.k_cache, self.v_cache = xk, xv  # 更新 KV 缓存
        # else:
        #     xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)  # 计算 Q, K, V

        # 通过线性层映射
        # query = self.query(query)
        # key = self.key(key)
        # value = self.value(value)

        # 如果有KV缓存，将新的key和value与缓存拼接
        # if kv_cache is not None:
        #     key = torch.cat([kv_cache['key'], key], dim=2)  # 连接新旧key
        #     value = torch.cat([kv_cache['value'], value], dim=2)  # 连接新旧value

        # 拆分成多头
        query = self.split_heads(xq)  # (batch_size, heads, q_seq_len, head_dim)
        key = self.split_heads(xk)  # (batch_size, heads, k_seq_len, head_dim)
        value = self.split_heads(xv)  # (batch_size, heads, v_seq_len, head_dim)

        # 计算注意力权重
        # energy = torch.einsum("bhqd,bhkd->bhqk", [query, key])  # (batch_size, heads, query_len, key_len)
        energy = torch.matmul(query, key.transpose(-2, -1))  # (batch_size, heads, query_len, key_len)

        if mask is not None:
            energy = energy.masked_fill(mask == 0, float("-1e20"))

        attention = torch.softmax(energy / (self.head_dim ** 0.5), dim=-1)  # (batch_size, heads, query_len, key_len)

        # # 加权求和值
        # out = torch.einsum("bhqk,bhvd->bhqd", [attention, value])  # (batch_size, heads, query_len, head_dim)
        # 加权求和值
        out = torch.matmul(attention, value)  # (batch_size, heads, query_len, head_dim)

        # 拼接所有头,
        # (batch_size, heads, query_len, head_dim) -> (batch_size, query_len, heads, head_dim) -> (b, seq_len, emb_dim)
        out = out.permute(0, 2, 1, 3).contiguous().view(batch_size, -1,
                                                        self.heads * self.head_dim)  # (batch_size, query_len, embed_size)

        # 最终线性变换
        out = self.fc_out(out)  # (batch_size, query_len, embed_size)

        return out


if __name__ == '__main__':
    # 测试代码
    # 创建一个MultiHeadAttention实例
    attention_layer = MultiHeadAttention(512, 8)
    # 创建一个随机输入张量
    # query = torch.rand(10, 32, 512)
    # key = torch.rand(10, 32, 512)
    # value = torch.rand(10, 32, 512)
    # 计算多头注意力
    x = torch.randn(10, 32, 512)
    attention_layer
    # with torch.no_grade():
    attention_layer.eval()
    # attention_layer.train()
    out = attention_layer(x)
    # out = mha(query, key, value)
    print(out.shape)  # 输出 (10, 32, 512)

    from transformers import DecisionTransformerModel