from torch import (
    zeros,
    cat,
    bmm,
    tensor,
    long,
    Tensor
)
from torch.nn import (
    Module,
    Embedding,
    Linear,
    LogSoftmax,
    Dropout,
    GRU,
    functional as F
)
from torch.utils.data import Dataset
from .core import (
    EOS_token,
    MAX_LENGTH,
    device,
    get_data
)

class PairDataset(Dataset):
    def __init__(self, data_path: str):
        self.en_word2idx, _, _, self.fr_word2idx, _, _, self.pairs = get_data(data_path)
        self.num_pairs = len(self.pairs)

    def __len__(self) -> int:
        return self.num_pairs

    def __getitem__(self, idx: int) -> tuple[Tensor, Tensor]:
        # sort index to be in range
        idx = min(max(idx, 0), self.num_pairs - 1)
        # get pairs
        en, fr = self.pairs[idx]

        # convert to indices
        en = [self.en_word2idx[word] for word in en.split(' ')]
        fr = [self.fr_word2idx[word] for word in fr.split(' ')]
        # append EOS token
        en.append(EOS_token)
        fr.append(EOS_token)

        # convert to tensor
        tensor_en = tensor(en, dtype=long, device=device)
        tensor_fr = tensor(fr, dtype=long, device=device)
        return tensor_en, tensor_fr

class Encoder(Module):
    def __init__(self, input_size: int, hidden_size: int):
        """Initialize the encoder with input size and hidden size"""
        super(Encoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size

        # create embedding and gru layers
        self.embedding = Embedding(self.input_size, self.hidden_size, device=device)
        self.gru = GRU(self.hidden_size, self.hidden_size, batch_first=True, device=device)

    def forward(self, inputs: Tensor, hidden: Tensor) -> tuple[Tensor, Tensor]:
        # shape [1, 6] -> [1, 6, 256]
        outputs = self.embedding(inputs)
        # shape gru([1, 6, 256], [1, 1, 256]) -> outputs: [1, 6, 256], hidden: [1, 1, 256]
        outputs, hidden = self.gru(outputs, hidden)
        return outputs, hidden

    def init_hidden(self) -> Tensor:
        return zeros(1, 1, self.hidden_size, device=device)

class Decoder(Module):
    def __init__(self, output_size: int, hidden_size: int):
        super(Decoder, self).__init__()
        self.output_size = output_size
        self.hidden_size = hidden_size

        # create embedding and gru layers
        self.embedding = Embedding(self.output_size, self.hidden_size, device=device)
        self.gru = GRU(self.hidden_size, self.hidden_size, batch_first=True, device=device)
        # create output using softmax activation function
        self.outputs = Linear(self.hidden_size, self.output_size, device=device)
        self.softmax = LogSoftmax(dim=-1)

    def forward(self, inputs: Tensor, hidden: Tensor) -> tuple[Tensor, Tensor]:
        # shape [1, 1] -> [1, 1, 256] or [1, 6] -> [1, 6, 256]
        outputs = self.embedding(inputs)
        outputs = F.relu(outputs)
        # gru([1, 1, 256], [1, 1, 256]) -> outputs: [1, 1, 256], hidden: [1, 1, 256]
        outputs, hidden = self.gru(outputs, hidden)
        # shape [1, 1, 256] -> [1, 256] -> [1, 4345]
        outputs = self.softmax(self.outputs(outputs[0]))
        return outputs, hidden

    def init_hidden(self) -> Tensor:
        return zeros(1, 1, self.hidden_size, device=device)

class AttDecoder(Module):
    def __init__(self, output_size: int, hidden_size: int, dropout_p: float=0.1, max_lenth: int=MAX_LENGTH):
        super(AttDecoder, self).__init__()
        self.output_size = output_size
        self.hidden_size = hidden_size
        self.dropout_p = dropout_p
        self.max_lenth = max_lenth

        # create embedding and linear layers used for attention
        self.embedding = Embedding(self.output_size, self.hidden_size, device=device)
        self.att = Linear(self.hidden_size * 2, self.max_lenth, device=device)
        self.att_combine = Linear(self.hidden_size * 2, self.hidden_size, device=device)
        # create dropout and gru layers
        self.dropout = Dropout(self.dropout_p)
        self.gru = GRU(self.hidden_size, self.hidden_size, batch_first=True, device=device)
        # creaye output layer and softmax activation function
        self.outputs = Linear(self.hidden_size, self.output_size, device=device)
        self.softmax = LogSoftmax(dim=-1)

    def forward(self, inputs: Tensor, hidden: Tensor, encoder_outputs: Tensor) -> tuple[Tensor, Tensor, Tensor]:
        # shape [1, 1] -> [1, 1, 256]
        embedded = self.dropout(self.embedding(inputs))
        att_weights = F.softmax(self.att(cat((embedded[0], hidden[0]), 1)), dim=1)
        # shape att_weights: [1, 1, 10], encoder_outputs: [1, 10, 256] -> [1, 1, 256]
        att_applied = bmm(att_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
        outputs = cat((embedded[0], att_applied[0]), 1)
        outputs = self.att_combine(outputs).unsqueeze(0)
        outputs = F.relu(outputs)

        # gru([1, 1, 256], [1, 1, 256]) --> outputs: [1, 1, 256], hidden: [1, 1, 256]
        outputs, hidden = self.gru(outputs, hidden)
        # shape [1, 1, 256] -> [1, 256] -> [1, 4345]
        outputs = self.softmax(self.outputs(outputs[0]))
        return outputs, hidden, att_weights

    def init_hidden(self) -> Tensor:
        return zeros(1, 1, self.hidden_size, device=device)

if __name__ == '__main__':
    from rich import print
    from att_test import (
        pair_dataset_test,
        encoder_test,
        decoder_test,
        att_decoder_test
    )

    data_path = '../data/en2fr.txt'
    hidden_size = 8
    head = 1

    print('[bold gold1]Pair dataset test script...[/]')
    pair_dataset_test(data_path, head=head)
    print('[bold gold1]Encoder test script...[/]')
    encoder_test(data_path, hidden_size=hidden_size, head=head)
    print('[bold gold1]Decoder test script...[/]')
    decoder_test(data_path, hidden_size=hidden_size, head=head)
    print('[bold gold1]Attention decoder test script...[/]')
    att_decoder_test(data_path, hidden_size=hidden_size, head=head)
