import random
import torch
from collections import defaultdict
import pandas as pd
import re
import torch.nn as nn
from dataprocesser import data_process
import os
from network import EncoderRNN, AttnDecoderRNN
import time
from torch import optim
from torch.utils.data import DataLoader
import config


def test_epoch(dataloader, criteria):
    encoder.eval()
    decoder.eval()
    losses = 0
    size = len(dataloader.dataset)
    with torch.no_grad():
        for x, y in dataloader:
            encoder_hidden = encoder.initHidden()
            x, y = x.to(config.device), y.to(config.device)
            len_input = x.shape[-1]
            encoder_outputs = torch.zeros(config.max_len, config.d_model).to(config.device)
            for word_id in range(len_input):
                encoder_out, encoder_hidden = encoder(x[:, word_id], encoder_hidden)
                encoder_outputs[word_id] = encoder_out[0, 0]
            deocder_hidden = encoder_hidden
            loss = 0
            decoder_input = torch.tensor([[out_sos_token]]).to(config.device)
            for word_id in range(y.shape[-1]):
                decoder_out, deocder_hidden, attention = decoder(decoder_input, deocder_hidden, encoder_outputs)
                loss += criteria(decoder_out, y[:, word_id])
                _, topi = decoder_out.topk(1)
                decoder_input = topi.squeeze().detach()
            loss = loss / y.shape[-1]
            losses += loss.item()
    return losses / size


# training with out bucketing
def train_epoch(dataloader, criteria):
    encoder.train()
    decoder.train()
    losses = 0
    size = len(dataloader.dataset)
    for x, y in dataloader:
        encoder_hidden = encoder.initHidden()
        x, y = x.to(config.device), y.to(config.device)
        # x: [batch_size, len_seq]
        encoder_optim.zero_grad()
        decoder_optim.zero_grad()

        len_input = x.shape[-1]
        encoder_outputs = torch.zeros(config.max_len, config.d_model).to(config.device)
        for word_id in range(len_input):
            encoder_out, encoder_hidden = encoder(x[:, word_id], encoder_hidden)
            # encoder_out: [1, batch_size, d_model]
            encoder_outputs[word_id] = encoder_out[0, 0]

        deocder_hidden = encoder_hidden
        decoder_outputs = torch.zeros(config.max_len, cn_vocab.vocab_size)

        loss = 0
        if random.random() < config.teacher_force:
            for word_id in range(y.shape[-1]):
                decoder_out, deocder_hidden, attention = decoder(y[:, word_id], deocder_hidden, encoder_outputs)
                # decoder_out: [batch_size, d_model]
                # print(decoder_out.shape)
                decoder_outputs[word_id] = decoder_out
                loss += criteria(decoder_out, y[:, word_id])
        else:
            decoder_input = torch.tensor([[out_sos_token]]).to(config.device)
            # input shape of each RNN cell: [batch_size, 1]
            for word_id in range(y.shape[-1]):
                decoder_out, deocder_hidden, attention = decoder(decoder_input, deocder_hidden, encoder_outputs)
                loss += criteria(decoder_out, y[:, word_id])
                _, topi = decoder_out.topk(1)
                decoder_input = topi.squeeze().detach()

        loss = loss / y.shape[-1]
        loss.backward()

        encoder_optim.step()
        decoder_optim.step()

        losses += loss.item()
    encoder_sch.step()
    decoder_sch.step()
    return losses / size


def load_model(model, optimizer, scheduler, log_dir):
    assert os.path.exists(log_dir), f"No such file path {log_dir}"
    checkpoint = torch.load(log_dir)
    model.load_state_dict(checkpoint['model'])
    optimizer.load_state_dict(checkpoint['optimizer'])
    scheduler.load_state_dict(checkpoint['scheduler'])
    cur_epoch = checkpoint['epoch']
    return model, optimizer, scheduler, cur_epoch


def save_model(model, optimizer, scheduler, cur_epch, log_dir):
    state = {'model': model.state_dict(),
             'optimizer': optimizer.state_dict(),
             'epoch': cur_epch,
             'scheduler': scheduler.state_dict()}
    torch.save(state, log_dir)


def train(epochs, checkout_point, verbose_point, train_loader, test_loader, criteria, is_load_model=False):
    global encoder, encoder_optim, encoder_sch, decoder, decoder_optim, decoder_sch
    encoder_path = f"model/encoder_{config.d_model}.pth"
    decoder_path = f"model/decoder_{config.d_model}.pth"
    if is_load_model:
        encoder, encoder_optim, encoder_sch, cur_epoch = load_model(encoder, encoder_optim, encoder_sch, encoder_path)
        decoder, decoder_optim, decoder_sch, _ = load_model(decoder, decoder_optim, decoder_sch, decoder_path)
    start_time = time.time()
    for epoch in range(epochs):
        train_loss = train_epoch(train_loader, criteria)
        dev_loss = test_epoch(test_loader, criteria)
        if (epoch + 1) % verbose_point == 0:
            end_time = time.time()
            print(f"Epoch: {epoch + 1}, Train loss: {train_loss:.3f}"
                  f" Dev loss: {dev_loss:.3f}"
                  f" , Epoch Time: {end_time - start_time:.3f}s, ")
            start_time = end_time
        if (epoch + 1) % checkout_point == 0:
            save_model(encoder, encoder_optim, encoder_sch, epoch, encoder_path)
            save_model(decoder, decoder_optim, decoder_sch, epoch, decoder_path)


def translate(en_sentences, en_vocab, cn_vocab, encoder, decoder):
    input_tensors = en_vocab.encode_dataset(en_sentences)
    output_list = []
    for s in input_tensors:
        encoder.eval()
        decoder.eval()
        outputs = torch.zeros(config.max_len).to(config.device)
        with torch.no_grad():
            encoder_hidden = encoder.initHidden()
            x = s.to(config.device)
            x = x.unsqueeze(0)
            len_input = x.shape[-1]
            encoder_outputs = torch.zeros(config.max_len, config.d_model).to(config.device)
            for word_id in range(len_input):
                encoder_out, encoder_hidden = encoder(x[:, word_id], encoder_hidden)
                encoder_outputs[word_id] = encoder_out[0, 0]
            deocder_hidden = encoder_hidden
            decoder_input = torch.tensor([[out_sos_token]]).to(config.device)
            for word_id in range(config.max_len):
                decoder_out, deocder_hidden, attention = decoder(decoder_input, deocder_hidden, encoder_outputs)
                _, topi = decoder_out.topk(1)
                decoder_input = topi.squeeze().detach()
                outputs[word_id] = decoder_input
        cn_output  = [cn_vocab.id2vocab[x] for x in outputs.cpu().numpy()]
        output_list.append(cn_output)
    return output_list


if __name__ == '__main__':
    torch.manual_seed(7)  # cpu
    torch.cuda.manual_seed(7)  # gpu
    random.seed(7)  # random and transform

    en_vocab, cn_vocab, en_train, cn_train = data_process(config.path)
    out_sos_token = cn_vocab.token['sos']
    out_eos_token = cn_vocab.token['eos']

    train_data = tuple(zip(en_train, cn_train))[:int(len(en_train) * config.split_ratio)]
    test_data = tuple(zip(en_train, cn_train))[int(len(en_train) * config.split_ratio):]
    # print("data size:", len(train_data))
    encoder = EncoderRNN(en_vocab.vocab_size, config.d_model).to(config.device)
    decoder = AttnDecoderRNN(cn_vocab.vocab_size, config.d_model, max_length=config.max_len).to(config.device)

    train_loader = DataLoader(train_data, batch_size=config.batch_size)
    test_loader = DataLoader(test_data, batch_size=config.batch_size)
    criteria = nn.NLLLoss()
    encoder_optim = optim.SGD(encoder.parameters(), lr=config.lr_en,momentum=config.momentum)
    decoder_optim = optim.SGD(decoder.parameters(), lr=config.lr_cn,momentum=config.momentum)

    encoder_sch = torch.optim.lr_scheduler.LambdaLR(
        optimizer=encoder_optim, lr_lambda=lambda epoch: 0.97 ** epoch)
    decoder_sch = torch.optim.lr_scheduler.LambdaLR(
        optimizer=decoder_optim, lr_lambda=lambda epoch: 0.97 ** epoch)

    train(config.epochs, config.checkout_point, config.verbose_point, train_loader,
          test_loader, criteria, config.is_load_model)

    tt = [
        "I am looking for you.",
        "welcome to beijing.",
        "Study hard.",
        "I don't know.",
        "I've got something I want to show you.",
        "Long time, no see. I hear that you've changed your job again.",
        "where are you going, let's go to have lunch."
    ]
    cn_test = translate(tt, en_vocab, cn_vocab, encoder, decoder)
    for i in cn_test:
        print(i)
