import os
import pickle
import random
import torch
import torch.nn as nn
import torch.optim as optim
from evaluate import evaluavtion_triple
from model_v2 import EncoderRNN, DecoderRNN, DecoderRNNDist
from config import *
from pre_process import load_data, get_batch, pack_seqs
import numpy as np
import torch.nn.functional as F


from util import get_user_paras

USE_CUDA = torch.cuda.is_available()

BASE_DIR = './data/'


def train(user_config={}):
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    device = user_config['device']
    epoch = user_config['epoch']
    cur_config = "b{}_a{}".format(BATCH_SIZE, alpha)
    print('current_config : batch_size {} alpha {} GPU {} epoch {}'.
          format(BATCH_SIZE, alpha, device, epoch))

    train_data, word2index, ix2tag, test_data = load_data('train_data_d.pkl',
                                                          'test_data_d.pkl')
    word_vec = pickle.load(open(BASE_DIR+'wv_matrix.pkl', 'rb'))

    encoder = EncoderRNN(len(word2index)+1, EMBEDDING_DIM, ENCODER_DIM, word_vec)
    decoder = DecoderRNNDist(len(ix2tag)+1, len(ix2tag) //7, DECODER_DIM,BATCH_SIZE, device=device)

    encoder = encoder.to(device)
    decoder = decoder.to(device)

    bias = [0.0, 1]
    bias.extend((alpha for i in range(1, len(ix2tag))))
    print(bias)
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))
    dist_criterion = nn.SmoothL1Loss()

    decoder.init_weights()
    enc_optim = optim.RMSprop(encoder.parameters(), lr=0.001, alpha=0.9, eps=1e-7)
    dec_optim = optim.RMSprop(decoder.parameters(), lr=0.001, alpha=0.9, eps=1e-7)

    random.shuffle(test_data)
    split = round(0.1 * len(test_data))
    valid_set = test_data[:split]
    test_set = test_data[split:]
    P, R, F1 = 0.0, 0.0, 0.0

    for step in range(epoch):
        losses = []
        random.shuffle(train_data)
        for i, batch in enumerate(get_batch(BATCH_SIZE, train_data)):
            sents, tags, distances = zip(*batch)
            if len(sents) < BATCH_SIZE:
                break
            x = torch.tensor(sents, device=device)
            y = torch.tensor(tags, device=device)
            dists = torch.tensor(distances, device=device).float()
            x_lens = torch.tensor(pack_seqs(sents), device=device)
            masked = x.gt(0).view(-1)
            encoder_masking = x.lt(1)

            encoder.zero_grad()
            decoder.zero_grad()

            enc_output, hidden_c = encoder(x, x_lens)
            start_decode = torch.tensor([[1]] * BATCH_SIZE, device=device)
            tag_score, dist_score = decoder(start_decode, hidden_c, enc_output, encoder_masking)
            pre = torch.argmax(tag_score, 1)
            pre_m = masked.view(-1) * pre.byte()
            dist_score = dist_score * masked.view(BATCH_SIZE, -1).float()
            tag_loss = tag_criterion(tag_score, y.view(-1))
            dist_loss = dist_criterion(dist_score, dists)

            loss = tag_loss + dist_loss

            loss.backward()
            losses.append(float(loss))

            torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5.0)
            torch.nn.utils.clip_grad_norm_(decoder.parameters(), 5.0)

            enc_optim.step()
            dec_optim.step()

            if i % 100 == 0:
                print('Epoch: [{}/{}], Step: [{}/{}], Loss: {:.4f}'.format(
                    step+1, epoch, i, len(train_data)//BATCH_SIZE, np.mean(losses)
                ))
                losses=[]

        # print('=========== valid ===============')
        # res = test_model(encoder, decoder, valid_set, user_config, ix2tag)
        # eval_res = evaluavtion_triple(res)
        # print("P R F \n{} {} {}".format(*eval_res))
        #
        print('============ test ===============')
        res = test_model(encoder, decoder, test_data, user_config, ix2tag)
        t_P, t_R, t_F = evaluavtion_triple(res)
        if t_F >= F1:
            i_m = step+1
            P, R, F1 = t_P, t_R, t_F
            # save_model(decoder, encoder, cur_config+"_s{}".format(step))
        print("Epoch: {}, P, R, F\n{:.4f} {:.4f} {:.4f}".format(step+1, t_P, t_R, t_F))
        print("MAX at Epoch: {} P, R, F\n{:.4f} {:.4f} {:.4f}".format(i_m, P, R, F1))

    print("Train Complete!")


def save_model(decoder, encoder, config):
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    torch.save(decoder.state_dict(), os.path.join(model_dir, 'decoder_{}.pkl'.format(config)))
    torch.save(encoder.state_dict(), os.path.join(model_dir, 'encoder_{}.pkl'.format(config)))


def test_model(encoder, decoder, data, user_config, ix2tag):
    device = user_config['device']
    BATCH_SIZE = user_config['batch']
    encoder = encoder.to(device)
    decoder = decoder.to(device)

    encoder.train(False)
    decoder.train(False)

    res = []
    for i, batch in enumerate(get_batch(BATCH_SIZE, data)):
        sents, tags, dists = zip(*batch)
        x = torch.tensor(sents, device=device)
        y = torch.tensor(tags, device=device)
        d = torch.tensor(dists, device=device).float()
        x_lens = torch.tensor(pack_seqs(sents), device=device)
        masked = x.gt(0).view(-1)
        encoder_masking = x.lt(1)

        enc_output, enc_hidden = encoder(x, x_lens)
        start_decode = torch.tensor([[2]] * BATCH_SIZE, device=device)
        tag_score, dist_score = decoder(start_decode, enc_hidden, enc_output, encoder_masking)
        pre = torch.argmax(tag_score, 1)
        pre_m = masked.view(BATCH_SIZE,-1) * pre.view(BATCH_SIZE,-1).byte()
        tag = masked.view(BATCH_SIZE,-1) * y.byte()

        for p_tag,t_tag in zip(pre_m.tolist(), tag.tolist()):
            p_sent = []
            t_sent = []
            for p,t in zip(p_tag, t_tag):
                if p>0:
                    p_sent.append(ix2tag[p])
                    t_sent.append(ix2tag[t])
            res.append([p_sent, t_sent])

    # pickle.dump(res, open(BASE_DIR+'result/result_6.pkl', 'wb'))
    encoder.train(True)
    decoder.train(True)
    return res


if __name__ == '__main__':
    user_config = get_user_paras()
    device = torch.device("cuda:{}".format(user_config['gpu']) if torch.cuda.is_available() else "cpu")
    user_config['device'] = device
    train(user_config)

    # device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    # test_data = pickle.load(open(BASE_DIR+'test_data_p.pkl', 'rb'))
    # res = test_model(test_data)
    # print(evaluavtion_triple(res))
