from torch import argmax, zeros, no_grad, save, load
from torch.nn import NLLLoss
from torch.utils.data import DataLoader
from torch.optim import Adam
import os
from tqdm import tqdm
from json import dump
from time import time
from typing import Literal
from rich import print
# import outside modules
from common.compare_model import compare_loss, compare_time, compare_acc
from common.core import read_json, read_data
from common.name_class import (
    LETTERS,
    COUNTRIES,
    NameClassDataset,
    RNNRebuild,
    LSTMRebuild,
    GRURebuild,
    Model
)

ModelType = Literal['rnn', 'lstm', 'gru', 'all']
DATA_PATH = './data/name_classification.txt'
MODEL_PARAM = {
    'input_size': len(LETTERS),
    'hidden_size': 128,
    'output_size': len(COUNTRIES),
    'num_layers': 2,
    'batch_first': False
}

def string2tensor(x):
    """String to Tensor, convert string to one-hot vector"""
    tensor_x = zeros(len(x), len(LETTERS)).to('cuda')
    for l, letter in enumerate(x):
        tensor_x[l][LETTERS.find(letter)] = 1
    return tensor_x

def load_model(model: ModelType, lr: float,
               epochs: int) -> list[Model] | Model:
    """Load model from `./model` folder, argument `model` is one of `rnn`, `lstm`, `gru`, `all`"""
    RNN_JSON_PATH = f'./model/{epochs}_{lr}/rnn_model.json'
    LSTM_JSON_PATH = f'./model/{epochs}_{lr}/lstm_model.json'
    GRU_JSON_PATH = f'./model/{epochs}_{lr}/gru_model.json'

    rnn_train_dict = read_json(RNN_JSON_PATH)
    lstm_train_dict = read_json(LSTM_JSON_PATH)
    gru_train_dict = read_json(GRU_JSON_PATH)

    rnn_model_path = rnn_train_dict['model_file']
    lstm_model_path = lstm_train_dict['model_file']
    gru_model_path = gru_train_dict['model_file']

    if model == 'all':
        rnn_model = RNNRebuild(**MODEL_PARAM)
        lstm_model = LSTMRebuild(**MODEL_PARAM)
        gru_model = GRURebuild(**MODEL_PARAM)

        rnn_model.load_state_dict(load(rnn_model_path))
        lstm_model.load_state_dict(load(lstm_model_path))
        gru_model.load_state_dict(load(gru_model_path))

        return [rnn_model, lstm_model, gru_model]
    else:
        if model == 'rnn':
            rnn_model = RNNRebuild(**MODEL_PARAM)
            rnn_model.load_state_dict(load(rnn_model_path))
            return rnn_model
        elif model == 'lstm':
            lstm_model = LSTMRebuild(**MODEL_PARAM)
            lstm_model.load_state_dict(load(lstm_model_path))
            return lstm_model
        elif model == 'gru':
            gru_model = GRURebuild(**MODEL_PARAM)
            gru_model.load_state_dict(load(gru_model_path))
            return gru_model

def train_name_class(data_path: str, model: Model,
                     model_path: str, lr: float=1e-3,
                     epochs: int=10, **kwargs) -> None:
    """
    Training name classification model and save
    the model loss and accuracy to json file

    Args:
        data_path: The path to the name classification data
        model: The model to train, either RNN, LSTM or GRU
        model_path: The path to save the trained model
        lr: The learning rate for the optimizer
        epochs: The number of epochs to train the model
        **kwargs: Additional keyword arguments for the model
            - `input_size`: The number of expected features in the input
            - `hidden_size`: The number of features in the hidden state
            - `output_size`: The number of output features
            - `num_layers`: The number of recurrent layers
            - `batch_first`: If True, the first input will
                be used as the initial hidden state
    """
    names, countries = read_data(data_path)
    name_class = NameClassDataset(names, countries)

    # define model and loss function, in this case
    # in this case use NLLLoss with GPU for fast training
    model_rebuild = model(**kwargs)
    cross_entropy = NLLLoss().to('cuda')
    adam = Adam(model_rebuild.parameters(), lr=lr)
    start = time()

    # define loss and accuracy list for store training result
    total_iter_num, total_acc_num = 0, 0
    total_loss = 0.0
    total_loss_list, total_acc_list = [], []

    for epoch in range(epochs):
        dl = DataLoader(name_class, batch_size=1, shuffle=True)
        # make model training more readable by adding a progress bar showing the status of training
        epochs_width = len(str(epochs))
        desc = f'epoch {epoch + 1:0{epochs_width}d}/{epochs}'
        with tqdm(dl, desc=desc, leave=True) as pbar:
            # loop over the training data and train the model
            for name, country in pbar:
                if type(model_rebuild) == LSTMRebuild:
                    hidden, c = model_rebuild.init_hidden()
                    outputs, hidden, c = model_rebuild(name[0], hidden, c)
                else:
                    hidden = model_rebuild.init_hidden()
                    outputs, hidden = model_rebuild(name[0], hidden)
                loss = cross_entropy(outputs, country)

                # backward
                adam.zero_grad()
                loss.backward()
                adam.step()

                # calculate loss and accuracy
                total_iter_num += 1
                total_loss = total_loss + loss.item()
                i_predit_tag = (1 if argmax(outputs).to('cuda').item() == country.item() else 0)
                total_acc_num += i_predit_tag

                # display and save the loss and accuracy every 100 iterations
                if total_iter_num % 100 == 0:
                    tmp_loss = total_loss / total_iter_num
                    tmp_acc = total_acc_num / total_iter_num
                    total_loss_list.append(tmp_loss)
                    total_acc_list.append(tmp_acc)

                    # update progress bar format
                    pbar.set_postfix(
                        acc=f'\033[1;32m{tmp_acc:.6f}\033[0m',
                        loss=f'\033[1;35m{tmp_loss:.6f}\033[0m'
                    )

    # create `{epochs}_{lr}` directory
    os.makedirs(f'./model/{epochs}_{lr}', exist_ok=True)
    # save model with number of epochs on the file name
    MODEL_NAME = f'./model/{epochs}_{lr}/{model_path}_model'
    save(model_rebuild.state_dict(), f'{MODEL_NAME}.bin')

    # save total loss and total accuracy list to a json file
    total_time = time() - start
    model_loss_acc = {
        'total_loss': total_loss_list,
        'total_acc': total_acc_list,
        'best_acc': max(total_acc_list),
        'total_time': total_time,
        'model_file': f'{MODEL_NAME}.bin'
    }
    with open(f'{MODEL_NAME}.json', 'w') as json_f:
        dump(model_loss_acc, json_f, indent=2)

def train(lr: float, epochs: int) -> None:
    """Training main function"""
    model_list = [RNNRebuild, LSTMRebuild, GRURebuild]
    model_path_list = ['rnn', 'lstm', 'gru']

    for model, model_path in zip(model_list, model_path_list):
        print(f'[bold magenta]Training {model_path} model:[/]')
        train_name_class(
            DATA_PATH,
            model=model,
            model_path=model_path,
            lr=lr,
            epochs=epochs,
            **MODEL_PARAM
        )

def compare(lr: float, epochs: int) -> None:
    """Compare loss, time taken and accuracy and save images"""
    compare_loss(lr=lr, epochs=epochs)
    compare_time(lr=lr, epochs=epochs)
    compare_acc(lr=lr, epochs=epochs)

def predict(model_type: ModelType, lr: float,
            epochs: int, name: str, k: int=3) -> None:
    """
    Predict the country of a name using RNN, LSTM and GRU models

    Args:
        model_type: Model type, either `rnn`, `lstm`, `gru` or all model
        lr: Find the model with the given learning rate
        epochs: Find the model with the given epochs
        name: Name to predict
        k: Pick the top K options for prediction
    """
    # convert name string to tensor vector
    name_tensor = string2tensor(name)
    # choice what model to use
    # RNN, LSTM, GRU or all model
    if model_type == 'all':
        model = load_model(model_type, lr, epochs)
    else:
        model = [load_model(model_type, lr, epochs)]

    # LSTM model are different from others
    for _model in model:
        with no_grad():
            if type(_model) == LSTMRebuild:
                hidden, c = _model.init_hidden()
                outputs, hidden, c = _model(name_tensor, hidden, c)
            else:
                hidden = _model.init_hidden()
                outputs, hidden = _model(name_tensor, hidden)
            topv, topi = outputs.topk(k, 1, True)

            # select model and use
            if type(_model) == RNNRebuild:
                model_name = 'RNN'
            elif type(_model) == LSTMRebuild:
                model_name = 'LSTM'
            elif type(_model) == GRURebuild:
                model_name = 'GRU'

            # display result
            print(f'[bold green]{model_name} -> {name}[/]')
            # pick top k results
            for i in range(k):
                value = topv[0][i]
                country_idx = topi[0][i]
                country = COUNTRIES[country_idx]

                print(
                    f'  value: [bold blue]{value:.2f}[/]'
                    f', country: [bold blue]{country}[/]'
                )

if __name__ == '__main__':
    lr = 1e-3
    epochs = 20

    print('[bold blue]Training model...[/]')
    train(lr=lr, epochs=epochs)
    print('[bold blue]Comparing loss, time and accuracy with three models...[/]')
    compare(lr=lr, epochs=epochs)
    print('[bold blue]Predicting model...[/]')
    predict(model_type='all', lr=lr, epochs=epochs, name='Andrew', k=3)
