import unittest
import torch
from src.module import AbsolutePositionalEncoding, MultiHeadAttention, EncoderLayer, DecoderLayer, Transformer


class TransformerTest(unittest.TestCase):
    def setUp(self):
        self.d_model = 512
        self.num_heads = 8
        self.d_ff = 2048
        self.num_layers = 6
        self.dropout = 0.1
        self.src_vocab_size = 100
        self.tgt_vocab_size = 100
        self.batch_size = 2
        self.seq_length = 10
        self.transformer = Transformer(self.src_vocab_size, self.tgt_vocab_size,
                                       self.d_model, self.num_heads,
                                       self.num_heads, self.d_ff, self.dropout)

    def test_positional_encoding(self):
        pos_encoding = AbsolutePositionalEncoding(self.d_model)
        input_tensor = torch.zeros((1, 100, self.d_model))
        output = pos_encoding(input_tensor)
        self.assertEqual(output.shape, input_tensor.shape)

    def test_multi_head_attention(self):
        mha = MultiHeadAttention(self.d_model, self.num_heads)
        Q = torch.randn(self.batch_size, self.seq_length, self.d_model)
        K = torch.randn(self.batch_size, self.seq_length, self.d_model)
        V = torch.randn(self.batch_size, self.seq_length, self.d_model)
        output = mha(Q, K, V)
        self.assertEqual(output.shape, (self.batch_size, self.seq_length, self.d_model))

    def test_encoder_layer(self):
        encoder_layer = EncoderLayer(self.d_model, self.num_heads, self.d_ff, self.dropout)
        x = torch.randn(self.batch_size, self.seq_length, self.d_model)
        mask = torch.ones(self.batch_size, 1, self.seq_length)
        output = encoder_layer(x, mask)
        self.assertEqual(output.shape, (self.batch_size, self.seq_length, self.d_model))

    def test_decoder_layer(self):
        decoder_layer = DecoderLayer(self.d_model, self.num_heads, self.d_ff, self.dropout)
        x = torch.randn(self.batch_size, self.seq_length, self.d_model)
        encoder_output = torch.randn(self.batch_size, self.seq_length, self.d_model)
        src_mask = torch.ones(self.batch_size, 1, self.seq_length)
        tgt_mask = torch.ones(self.batch_size, 1, self.seq_length)
        output = decoder_layer(x, encoder_output, src_mask, tgt_mask)
        self.assertEqual(output.shape, (self.batch_size, self.seq_length, self.d_model))

    def test_transformer_forward(self):
        src = torch.randint(0, self.src_vocab_size, (self.batch_size, self.seq_length))
        tgt = torch.randint(0, self.tgt_vocab_size, (self.batch_size, self.seq_length))
        src_mask = torch.ones(self.batch_size, 1, self.seq_length)
        tgt_mask = torch.ones(self.batch_size, 1, self.seq_length)
        output = self.transformer(src, tgt, src_mask, tgt_mask)
        self.assertEqual(output.shape, (self.batch_size, self.seq_length, self.tgt_vocab_size))


if __name__ == '__main__':
    unittest.main()
