from torch import (
    zeros,
    tensor,
    long,
    Tensor
)
from torch.nn import (
    Module,
    RNN,
    LSTM,
    GRU,
    Linear,
    LogSoftmax
)
from torch.utils.data import Dataset
from string import ascii_letters

LETTERS = ascii_letters + ' .,;\''
COUNTRIES = [
    'Arabic', 'Chinese', 'Czech', 'Dutch', 'English', 'French',
    'German', 'Greek', 'Irish', 'Italian', 'Japanese', 'Korean',
    'Polish', 'Portuguese', 'Russian', 'Scottish', 'Spanish', 'Vietnamese'
]

class NameClassDataset(Dataset):
    """Name Classification Dataset"""
    def __init__(self, names: list[str], countries: list[str]):
        """
        Initialize the dataset with a list
        of names and a list of countries

        Args:
            names: A list of names
            countries: A list of countries
        """
        super(NameClassDataset, self).__init__()
        self.names = names
        self.countries = countries
        self.num_names = len(self.names)

    def __len__(self) -> int:
        return self.num_names

    def __getitem__(self, idx: int) -> tuple[Tensor, str, Tensor, str]:
        idx = min(max(idx, 0), self.num_names - 1)
        name = self.names[idx]
        country = self.countries[idx]
        tensor_name = zeros(len(name), len(LETTERS)).to('cuda')
        tensor_country = tensor(COUNTRIES.index(country), dtype=long).to('cuda')
        for l, letter in enumerate(name):
            tensor_name[l][LETTERS.find(letter)] = 1
        return tensor_name, tensor_country

class RNNRebuild(Module):
    def __init__(self, input_size: int, hidden_size: int,
                 output_size: int, num_layers: int=1, batch_first: bool=False):
        """
        RNN rebuild model for name classification,
        separated with original RNN model from PyTorch

        Args:
            input_size: The number of expected features in the input
            hidden_size: The number of features in the hidden state
            output_size: The number of output features
            num_layers: The number of recurrent layers
            batch_first: If True, the first input will be used as the initial hidden state
        """
        super(RNNRebuild, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.batch_first = batch_first

        self.rnn = RNN(
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=self.batch_first
        ).to('cuda')
        self.linear = Linear(self.hidden_size, self.output_size).to('cuda')
        self.softmax = LogSoftmax(dim=-1).to('cuda')

    def forward(self, inputs: Tensor,
                hidden: Tensor) -> tuple[Tensor, Tensor]:
        inputs = inputs.unsqueeze(1)
        rr, hn = self.rnn(inputs, hidden)
        tmp_rr = rr[-1]
        tmp_rr = self.linear(tmp_rr)
        return self.softmax(tmp_rr), hn

    def init_hidden(self) -> Tensor:
        return zeros(self.num_layers, 1, self.hidden_size).to('cuda')

class LSTMRebuild(Module):
    def __init__(self, input_size: int, hidden_size: int,
                 output_size: int, num_layers: int=1, batch_first: bool=False):
        """
        LSTM rebuild model for name classification,
        separated with original LSTM model from PyTorch

        Args:
            input_size: The number of expected features in the input
            hidden_size: The number of features in the hidden state
            output_size: The number of output features
            num_layers: The number of recurrent layers
            batch_first: If True, the first input will be used as the initial hidden state
        """
        super(LSTMRebuild, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.batch_first = batch_first

        self.lstm = LSTM(
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=self.batch_first
        ).to('cuda')
        self.linear = Linear(self.hidden_size, self.output_size).to('cuda')
        self.softmax = LogSoftmax(dim=-1).to('cuda')

    def forward(self, inputs: Tensor, hidden: Tensor,
                c: Tensor) -> tuple[Tensor, Tensor, Tensor]:
        inputs = inputs.unsqueeze(1)
        rr, (hn, cn) = self.lstm(inputs, (hidden, c))
        tmp_rr = rr[-1]
        tmp_rr = self.linear(tmp_rr)
        return self.softmax(tmp_rr), hn, cn

    def init_hidden(self) -> tuple[Tensor, Tensor]:
        hidden = zeros(self.num_layers, 1, self.hidden_size).to('cuda')
        c = zeros(self.num_layers, 1, self.hidden_size).to('cuda')
        return hidden, c

class GRURebuild(Module):
    def __init__(self, input_size: int, hidden_size: int,
                 output_size: int, num_layers: int=1, batch_first: bool=False):
        """
        LSTM rebuild model for name classification,
        separated with original LSTM model from PyTorch

        Args:
            input_size: The number of expected features in the input
            hidden_size: The number of features in the hidden state
            output_size: The number of output features
            num_layers: The number of recurrent layers
            batch_first: If True, the first input will be used as the initial hidden state
        """
        super(GRURebuild, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.batch_first = batch_first

        self.gru = GRU(
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=self.batch_first
        ).to('cuda')
        self.linear = Linear(self.hidden_size, self.output_size).to('cuda')
        self.softmax = LogSoftmax(dim=-1).to('cuda')

    def forward(self, inputs: Tensor, hidden: Tensor) -> tuple[Tensor, Tensor]:
        inputs = inputs.unsqueeze(1)
        rr, hn = self.gru(inputs, hidden)
        tmp_rr = rr[-1]
        tmp_rr = self.linear(tmp_rr)
        return self.softmax(tmp_rr), hn

    def init_hidden(self) -> Tensor:
        return zeros(self.num_layers, 1, self.hidden_size).to('cuda')

Model = RNNRebuild | LSTMRebuild | GRURebuild
