import random

import torch
import math
from torch.autograd import Variable
from Tuatara_Transfromer.embedding import Embeddings
from Tuatara_Transfromer.positional_encoding import PositionalEncoding
from Tuatara_Transfromer.multi_head_attention import MultiHeadAttention
from Tuatara_Transfromer.multi_head_attention_v2 import MultiHeadAttention_2

def test_example():
    torch.manual_seed(0)

    d_model = 512  # 词嵌入维度embedding_dim
    vocab = 1000  # 词表大小是1000

    # ---原始数据---
    input = torch.LongTensor([[1, 2, 3, 4], [5, 6, 7, 8]])

    # ---词嵌入层---
    my_embeddings = Embeddings(d_model,  vocab)
    x_embedded = my_embeddings(input)

    print("")
    print("x_embedded")
    print(x_embedded)
    print(x_embedded.shape)

    # ---位置编码---
    pe_dropout_ratio = 0.1  # 置0比率为0.1
    max_len = 60  # 句子最大长度
    pe = PositionalEncoding(d_model=d_model, dropout_ratio=pe_dropout_ratio, max_len=max_len)
    pe_result = pe(x_embedded)
    print("pe_result:", pe_result)

    # ---多头注意力---
    # 头数head
    head = 8

    # 输入的Q，K，V仍然相等
    query = value = key = pe_result

    # 输入的掩码张量mask
    mask = Variable(torch.zeros(8, 4, 4))

    # 置零比率dropout
    attention_dropout_ratio = 0.0

    # mha = MultiHeadAttention(head, d_model, dropout_ratio=attention_dropout_ratio)
    mha = MultiHeadAttention_2(head, d_model, dropout_ratio=attention_dropout_ratio)
    mha_result = mha(query, key, value, mask)

    print("")
    print("multi head attention Result")
    print(mha_result)
    print(mha_result.shape)



def test_debug():
    torch.manual_seed(0)

    d_model = 8  # 词嵌入维度embedding_dim
    vocab = 1000  # 词表大小是1000

    # ---原始数据---
    input = torch.LongTensor([[1, 2, 3, 4], [5, 6, 7, 8]])

    # ---词嵌入层---
    my_embeddings = Embeddings(d_model, vocab)
    x_embedded = my_embeddings(input)

    print("")
    print("x_embedded")
    print(x_embedded)
    print(x_embedded.shape)

    # ---位置编码---
    pe_dropout_ratio = 0.1  # 置0比率为0.1
    max_len = 60  # 句子最大长度
    pe = PositionalEncoding(d_model=d_model, dropout_ratio=pe_dropout_ratio, max_len=max_len)
    pe_result = pe(x_embedded)
    print("pe_result:", pe_result)

    # ---多头注意力---
    # 头数head
    head = 2

    # 输入的Q，K，V仍然相等
    query = value = key = pe_result

    # 输入的掩码张量mask
    mask = Variable(torch.zeros(8, 4, 4))
    mask = None

    # 置零比率dropout
    attention_dropout_ratio = 0.0

    # mha = MultiHeadAttention(head, d_model, dropout_ratio=attention_dropout_ratio)
    mha = MultiHeadAttention_2(head, d_model, dropout_ratio=attention_dropout_ratio)
    mha_result = mha(query, key, value, mask)

    print("")
    print("multi head attention Result")
    print(mha_result)
    print(mha_result.shape)


def test_simple():
    torch.manual_seed(0)

    # 参数设置
    embed_dim = 8  # 输入维度
    num_heads = 2  # 头数
    seq_len = 4  # 序列长度
    batch_size = 2  # 批次大小

    query = key = value = torch.randn(batch_size, seq_len, embed_dim)
    # print("")
    # print(query)
    # print(query.shape)

    # 初始化Multi-Head Attention
    # mha = MultiHeadAttention(num_heads, embed_dim, dropout_ratio=0)
    mha = MultiHeadAttention_2(num_heads, embed_dim, dropout_ratio=0)

    # 前向传播
    output, attn_weights = mha(query, key, value)

    print("Multi Head Output")
    print(output)

    print("Output shape:", output.shape)  # [4, 10, 512]
    print("Attention weights shape:", attn_weights.shape)  # [4, 8, 10, 10]



if __name__ == '__main__':

    # test_example()
    # test_debug()
    test_simple()

    # d_model = 512
    # vocab = 1000
    #
    # d_model = 2
    # vocab = 10
    #
    # # x = Variable(torch.LongTensor([[100,2,421,508],[491,998,1,221]]))
    # x = Variable(torch.LongTensor([[0,1,2,3],[6,7,8,9]]))
    # # x = Variable(torch.LongTensor([[0,1,2,3],[3,2,1,0]]))
    #
    # my_embeddings = Embeddings(d_model,  vocab)
    # print("weight")
    # print(my_embeddings.lut.weight)
    #
    # x_embedded = my_embeddings(x)
    # #
    # print("x_embedded:")
    # print(x_embedded)
    # print(x_embedded.shape)
    #
    # dropout = 0.1 # 失活比例
    # max_len = 5 # 句子最大长度
    #
    # pe = PositionalEncoding(d_model=d_model, dropout=dropout, max_len=max_len)
    # x_positional = pe(x_embedded)
    # # print("x_positional:")
    # # print(x_positional)