import torch.nn as nn

from Decoder import Decoder
from Encoder import Encoder
from config import *


class Transformer(nn.Module):
    """
    example:
    import torch
    input_ = torch.ones([2, 10], dtype=torch.int64)
    output_ = torch.ones([2, 20], dtype=torch.int64)
    transformer = Transformer()
    print(torch.sum(transformer(input_, output_)[0][0]))
    print(transformer(input_, output_))
    print(transformer(input_, output_).shape)
    print(transformer)
    """

    def __init__(self):
        super().__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()
        self.fc = nn.Linear(d_model, vocab_target_len)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, inputs, outputs):
        encoder_output = self.encoder(inputs)
        decoder_output = self.decoder(outputs, encoder_output)
        return self.softmax(self.fc(decoder_output))

# test loss
# input_ = torch.ones([2, 2], dtype=torch.int64)
# output_ = torch.ones([2, 3], dtype=torch.int64)
# transformer = Transformer()
# print(torch.sum(transformer(input_, output_)[0][0]))
# pre = transformer(input_, output_)
# ground = torch.tensor([[777, 34, 266],
#                        [67, 1234, 875]], dtype=torch.int64)
# loss = nn.NLLLoss()
# print(loss(pre, ground))
# predict = torch.rand([1, 3, 5])
# predict = torch.nn.functional.softmax(predict, dim=-1)
# predict = torch.log(predict)
# predict = predict.view(predict.shape[0] * predict.shape[1], -1)
# print(predict)
# label = torch.tensor([2, 0, 0])
# print(label)
# label = label.view(-1)

# predict = torch.tensor([[-1.7084, -1.7156, -1.2746, -1.7787, -1.6577],
#         [-2.0025, -1.3667, -1.6205, -1.4913, -1.6756],
#         [-1.4653, -1.5864, -1.7044, -1.8210, -1.5115]])
# print(predict)
# label = torch.tensor([1,1000])
# print(loss(predict, label))
# print(pre)
# print(pre.shape)
# print(transformer)
