import os
import random
import time

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from model_v2 import EncoderRNN,DecoderRNN
import torch.nn.functional as F

from config import *
from pre_process import load_data, get_batch, pack_seqs
from masked_cross_entropy import compute_loss, compute_loss_bias

USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

BASE_DIR = './data/'


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(1 / (batch_size*50)))
    return res

def train():
    train_data, word2index, tag2index = load_data('test_data_p.pkl')

    encoder = EncoderRNN(len(word2index), EMBEDDING_DIM, ENCODER_DIM)
    decoder = DecoderRNN(len(tag2index), len(tag2index) //3, DECODER_DIM,10)

    encoder = encoder.to(device)
    decoder = decoder.to(device)

    decoder.init_embedding_weights()
    enc_optim = optim.Adam(encoder.parameters(), lr=0.01)
    dec_optim = optim.Adam(decoder.parameters(), lr=0.01)

    for step in range(1):
        losses = []
        random.shuffle(train_data)
        split = round(0.9*len(train_data))
        train_set = train_data[:split]
        valid_set = train_data[split:]

        for i, batch in enumerate(get_batch(BATCH_SIZE, train_set)):
            sents, tags = zip(*batch)
            x = torch.tensor(sents, device=device)
            y = torch.tensor(tags, device=device)

            x_lens = torch.tensor(pack_seqs(sents), device=device)
            x_lens_sort, sent2ix_sort = torch.sort(x_lens, descending=True)
            sent2ix_unsort, _ = torch.sort(sent2ix_sort)

            # y_lens_sort, tag2ix_sort = torch.sort(x_lens, descending=True)
            # tag2ix_unsort, _ = torch.sort(tag2ix_sort)

            encoder.zero_grad()
            decoder.zero_grad()

            enc_output, enc_hidden = encoder(x[sent2ix_sort], x_lens_sort)
            start_decode = torch.tensor([[1]] * BATCH_SIZE, device=device)

            tag_score = decoder(start_decode, enc_hidden, enc_output, x_lens_sort)

            # loss1 = loss_function(tag_score, y[sent2ix_sort].view(-1))
            loss = compute_loss_bias(tag_score.view(BATCH_SIZE,50,-1), y[sent2ix_sort], x_lens)

            ls = loss.data
            loss.backward()
            losses.append(ls)

            torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5.0)
            torch.nn.utils.clip_grad_norm_(decoder.parameters(), 5.0)

            enc_optim.step()
            dec_optim.step()

            if i % 100==0:
                print("Step",step," epoch",i," : ",np.mean(losses))
                losses=[]
            break

        for i, batch in enumerate(get_batch(BATCH_SIZE, valid_set)):
            sents, tags = zip(*batch)
            x = torch.tensor(sents, device=device)
            y = torch.tensor(tags, device=device)

            x_lens = torch.tensor(pack_seqs(sents), device=device)
            x_lens_sort, sent2ix_sort = torch.sort(x_lens, descending=True)
            sent2ix_unsort, _ = torch.sort(sent2ix_sort)

            encoder.zero_grad()
            decoder.zero_grad()

            encoder.train(False)
            decoder.train(False)

            enc_output, enc_hidden = encoder(x[sent2ix_sort], x_lens_sort)
            start_decode = torch.tensor([[1]] * BATCH_SIZE, device=device)

            tag_score = decoder(start_decode, enc_hidden, enc_output, x_lens_sort)
            acc = accuracy(tag_score, y)

            print(acc)


    # if not os.path.exists(model_dir):
    #     os.makedirs(model_dir)
    #
    # torch.save(decoder.state_dict(), os.path.join(model_dir, 'decoder.pkl'))
    # torch.save(encoder.state_dict(), os.path.join(model_dir, 'encoder.pkl'))
    # print("Train Complete!")


if __name__ == '__main__':
    train()