from torch import (
    zeros,
    tensor,
    long,
    no_grad,
    save,
    load,
    Tensor
)
from torch.nn import NLLLoss
from torch.optim import Adam
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import json
from time import time
from os import makedirs
from tqdm import tqdm
from random import random
from rich import print
from common.attention import (
    PairDataset,
    Encoder,
    AttDecoder
)
from common.core import (
    MAX_LENGTH,
    SOS_token,
    EOS_token,
    get_data,
    device
)

DATA_PATH = './data/en2fr.txt'
teacher_forcing_ratio = 0.5

def train_iters(x: Tensor, y: Tensor, encoder, att_encoder, adam_encoder: Adam, adam_decoder: Adam, cross_entropy_loss) -> float:
    """
    Train model for every iterations

    Args:
        x, y: English and French word tensors
        encoder: Encoder
        att_encoder: Attention decoder
        adam_encoder: Encoder optimizer, using Adam
        adam_decoder: Decoder optimizer, using Adam
        cross_entropy_loss: Cross entropy loss function

    Returns:
        Loss value for every iterations
    """
    # encode encoder_hidden and encoder_output
    encoder_hidden = encoder.init_hidden()
    encoder_output, encoder_hidden = encoder(x, encoder_hidden)

    # argument 1: encoder_output_c [max_length, 256]
    encoder_output_c = zeros(
        MAX_LENGTH,
        encoder.hidden_size,
        device=device
    )
    for i in range(x.shape[1]):
        encoder_output_c[i] = encoder_output[0, i]
    # argument 2
    decoder_hidden = encoder_hidden
    # argument 3
    input_y = tensor([[SOS_token]], device=device)

    loss = 0.0
    y_len = y.shape[1]

    use_teacher_forcing = True if random() < teacher_forcing_ratio else False
    if use_teacher_forcing:
        for i in range(y_len):
            # shape: [1, 1], [1, 1, 256], [max_length, 256]
            # to shape: [1, 4345], [1, 1, 256], [1, max_length]
            output_y, decoder_hidden, _ = att_encoder(
                input_y,
                decoder_hidden,
                encoder_output_c
            )
            target_y = y[0][i].view(1)
            loss += cross_entropy_loss(output_y, target_y)
            input_y = y[0][i].view(1, -1)
    else:
        for i in range(y_len):
            # shape: [1, 1], [1, 1, 256], [max_length, 256]
            # to shape: [1, 4345], [1, 1, 256], [1, max_length]
            output_y, decoder_hidden, _ = att_encoder(
                input_y,
                decoder_hidden,
                encoder_output_c
            )
            target_y = y[0][i].view(1)
            loss += cross_entropy_loss(output_y, target_y)

            _, topi = output_y.topk(1)
            if topi.squeeze().item() == EOS_token:
                break
            input_y = topi.detach()

    # regression clean
    adam_encoder.zero_grad()
    adam_decoder.zero_grad()
    # backward
    loss.backward()
    # regression update
    adam_encoder.step()
    adam_decoder.step()

    return loss.item() / y_len

def train_seq2seq(data_path: str, hidden_size: int, lr: float=1e-4, epochs: int=1) -> None:
    """
    Train model using Sequence to Sequence architecture

    Args:
        data_path: Path to the English to French translation data file
        hidden_size: Hidden size
        lr: Learning rate
        epochs: Number of epochs for training

    Returns:
        List of loss value for plot or None
    """
    en_word_n, fr_word_n = get_data(data_path)[2], get_data(data_path)[5]
    pair_dataset = PairDataset(data_path)
    dl = DataLoader(pair_dataset, batch_size=1, shuffle=True)
    # create encoder and attention decoder class
    encoder = Encoder(en_word_n, hidden_size)
    att_decoder = AttDecoder(fr_word_n, hidden_size)

    # create adam encoder and decoder using Adam
    adam_encoder = Adam(encoder.parameters(), lr=lr)
    adam_decoder = Adam(att_decoder.parameters(), lr=lr)

    # create cross_entropy_loss using NLLLoss
    cross_entropy_loss = NLLLoss()
    # create model train arguments
    loss_list = []
    start = time()

    for epoch in range(1, epochs + 1):
        total_loss = 0.0
        # loop though all iterations
        pbar = tqdm(dl, desc=f'Epoch {epoch}', unit='step')

        for i, (en, fr) in enumerate(pbar, start=1):
            loss = train_iters(
                en, fr, encoder, att_decoder,
                adam_encoder, adam_decoder,
                cross_entropy_loss
            )
            total_loss += loss

            # calculate loss value every 100 iterations
            if i % 100 == 0:
                loss_avg = total_loss / 100
                # add loss average into loss list
                loss_list.append(loss_avg)
                total_loss = 0
                pbar.set_postfix({'loss': f'\033[1;32m{loss_avg:.6f}\033[0m'})
    end = time()

    # save encoder and decoder model file
    MODEL_PATH = f'./model/{epochs}_{lr}'
    makedirs(MODEL_PATH, exist_ok=True)
    save(encoder.state_dict(), f'{MODEL_PATH}/encoder.bin')
    save(att_decoder.state_dict(), f'{MODEL_PATH}/att_decoder.bin')

    # save all loss values to a json file
    model_loss = {
        'total_loss': loss_list,
        'total_time': end - start,
        'encoder': f'{MODEL_PATH}/encoder.bin',
        'att_decoder': f'{MODEL_PATH}/att_decoder.bin'
    }
    with open(f'./model/{epochs}_{lr}/en2fr_seq2seq.json', 'w', encoding='utf-8') as json_f:
        json.dump(model_loss, json_f, indent=2)

def plot_seq2seq_loss(lr: float=1e-4, epochs: int=1) -> None:
        json_path = f'./model/{epochs}_{lr}/en2fr_seq2seq.json'
        with open(json_path, 'r', encoding='utf-8') as json_f:
            model_loss = json.load(json_f)
        loss_list = model_loss['total_loss']
        # train for all epochs done, plot loss graph
        plt.figure(0)
        plt.title(f'Seq2Seq loss with {epochs} epochs and learning rate {lr}')
        plt.tight_layout()
        plt.plot(loss_list)
        plt.savefig(f'./images/seq2seq_loss_{epochs}_{lr}.png')

def evaluate_model(data_path: str, x: Tensor, encoder, att_decoder) -> tuple[list, Tensor]:
    """
    Evaluate Sequence to Sequence model

    Args:
        data_path: Path to the English to French translation data file
        x: French word tensors
        encoder: Encoder
        att_decoder: Attention decoder

    Returns:
        Tuple of decoded words and decoder tentions tensor
    """
    fr_idx2word = get_data(data_path)[4]
    with no_grad():
        encoder_hidden = encoder.init_hidden()
        encoder_output, encoder_hidden = encoder(x, encoder_hidden)

        # 解码参数1 fixed length intermediate semantic tensor `c`
        encoder_outputs_c = zeros(MAX_LENGTH, encoder.hidden_size, device=device)
        x_len = x.shape[1]
        for i in range(x_len):
            encoder_outputs_c[i] = encoder_output[0, i]
        # decoder argument 2: output of the last hidden layer is used as the input of the first time step hidden layer of the decoder
        decode_hidden = encoder_hidden
        # decoder argument 3: decoder first time step start character
        input_y = tensor([[SOS_token]], device=device)

        # auto-regressive decoding
        # initialize predicted vocab list
        decoded_words = []
        # initalize decoder attention tensor
        decoder_attentions = zeros(MAX_LENGTH, MAX_LENGTH)
        for i in range(MAX_LENGTH):
            output_y, decode_hidden, att_weights = att_decoder(
                input_y,
                decode_hidden,
                encoder_outputs_c
            )
            # predicted value is used as the input value for the next time step
            _, topi = output_y.topk(1)
            decoder_attentions[i] = att_weights

            # if output is EOS_token, break loop
            if topi.squeeze().item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(fr_idx2word[topi.item()])

            # assign the index of this prediction to `input_y` to predict the next time step
            input_y = topi.detach()

        return decoded_words, decoder_attentions[:i + 1]

def evaluate_seq2seq(data_path: str, lr: float=1e-4, epochs: int=1) -> None:
    """
    Evaluate Sequence to Sequence model and output with predictions

    Args:
        data_path: Path to the English to French translation data file
        lr: Learning rate
        epochs: Number epochs for training
    """
    # read json data file and extract encoder and decoder path
    json_path = f'./model/{epochs}_{lr}/en2fr_seq2seq.json'
    with open(json_path, 'r', encoding='utf-8') as json_f:
        json_data = json.load(json_f)
    ENCODER_PATH = json_data['encoder']
    DECODER_PATH = json_data['att_decoder']

    en_word2idx, _, en_word_n, _, _, fr_word_n, _ = get_data(data_path)
    encoder_input_size = en_word_n
    decoder_input_size = fr_word_n
    hidden_size = 256

    # initalize encoder model
    encoder = Encoder(encoder_input_size, hidden_size)
    encoder.load_state_dict(load(
        ENCODER_PATH,
        map_location=lambda storage, _: storage
    ), False)
    # initalize attention decoder model
    att_decoder = AttDecoder(decoder_input_size, hidden_size)
    att_decoder.load_state_dict(load(
        DECODER_PATH,
        map_location=lambda storage, _: storage
    ), False)

    sample_pairs = [
        ['i m impressed with your french .', 'je suis impressionne par votre francais .'],
        ['i m more than a friend .', 'je suis plus qu une amie .'],
        ['she is beautiful like her mother .', 'elle est belle comme sa mere .']
    ]

    for _, pair in enumerate(sample_pairs):
        x, y = pair

        # sample x text digitization
        tmpx = [en_word2idx[word] for word in x.split(' ')]
        tmpx.append(EOS_token)
        tensor_x = tensor(tmpx, dtype=long, device=device).view(1, -1)
        # model prediction
        decoded_words, _ = evaluate_model(
            data_path=data_path,
            x=tensor_x,
            encoder=encoder,
            att_decoder=att_decoder
        )
        output_sentence = ' '.join(decoded_words)

        print(f'[bold green]>[/] {x}')
        print(f'[bold green]=[/] {y}')
        print(f'[bold green]<[/] {output_sentence}')
        print()

def plot_attention(data_path: str, lr: float=1e-4, epochs: int=1) -> None:
    """
    Evaluate Sequence to Sequence model and output with predictions

    Args:
        data_path: Path to the English to French translation data file
        lr: Learning rate
        epochs: Number epochs for training
    """
    # read json data file and extract encoder and decoder path
    json_path = f'./model/{epochs}_{lr}/en2fr_seq2seq.json'
    with open(json_path, 'r', encoding='utf-8') as json_f:
        json_data = json.load(json_f)
    ENCODER_PATH = json_data['encoder']
    DECODER_PATH = json_data['att_decoder']

    en_word2idx, _, en_word_n, _, _, fr_word_n, _ = get_data(data_path)
    encoder_input_size = en_word_n
    decoder_input_size = fr_word_n
    hidden_size = 256

    encoder = Encoder(encoder_input_size, hidden_size)
    encoder.load_state_dict(load(
        ENCODER_PATH,
        map_location=lambda storage, loc: storage
    ), False)
    att_decoder = AttDecoder(decoder_input_size, hidden_size)
    att_decoder.load_state_dict(load(
        DECODER_PATH,
        map_location=lambda storage, loc: storage
    ), False)

    sentence = 'we re both teachers .'
    # sample x text digitization
    tmpx = [en_word2idx[word] for word in sentence.split(' ')]
    tmpx.append(EOS_token)
    tensor_x = tensor(tmpx, dtype=long, device=device).view(1, -1)

    # model prediction
    _, attentions = evaluate_model(
        data_path=data_path,
        x=tensor_x,
        encoder=encoder,
        att_decoder=att_decoder
    )

    plt.matshow(attentions.numpy())
    plt.savefig(f'./images/seq2seq_attention_{epochs}_{lr}.png')

if __name__ == '__main__':
    lr = 5e-4
    epochs = 2
    hidden_size = 256

    print('[bold magenta]Training model...[/]')
    train_seq2seq(data_path=DATA_PATH, hidden_size=hidden_size, lr=lr, epochs=epochs)
    print('[bold magenta]Plot Seq2Seq loss graph...[/]')
    plot_seq2seq_loss(lr=lr, epochs=epochs)
    print('[bold magenta]Model evaluate...[/]')
    evaluate_seq2seq(data_path=DATA_PATH, lr=lr, epochs=epochs)
    print('[bold magenta]Plot attention graph...[/]')
    plot_attention(data_path=DATA_PATH, lr=lr, epochs=epochs)
