import logging
import os
import pickle
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR

import logger
from evaluate import evaluavtion_triple
from model_vlen import EncoderRNN, DecoderRNN
from config import *
from pre_process import load_data, get_batch, pack_seqs
import numpy as np
import torch.nn.functional as F
from util import get_user_paras

BASE_DIR = './data/'
logger.setup_logging()


def add_regularization(model, weight_decay=2e-5):
    weight_p, bias_p = [], []
    for name, p in model.named_parameters():
        if 'bias' in name:
            bias_p += [p]
        else:
            weight_p += [p]

    return [{'params': weight_p, 'weight_decay':weight_decay},
           {'params': bias_p, 'weight_decay':0}]


def train(user_config={}):
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    device = user_config['device']
    epoch = user_config['epoch']
    regularization = user_config['regularization']
    lr = user_config['lr']
    dropout = user_config['dropout']

    cur_config = "b{}a{}e{}r{}d{}".format(BATCH_SIZE, alpha, epoch, regularization, dropout)
    logging.info('current_config : batch_size {} alpha {} GPU {} epoch {} regularization {} lr {} dropout'
                 ' {}'.
          format(BATCH_SIZE, alpha, device, epoch, regularization, lr, dropout))

    train_data, word2index, ix2tag, test_data = load_data('train_data_d.pkl',
                                                          'test_data_d.pkl')
    word_vec = pickle.load(open(BASE_DIR+'wv_matrix.pkl', 'rb'))

    encoder = EncoderRNN(len(word2index)+1, EMBEDDING_DIM,
                         ENCODER_DIM, word_vec,
                         dropout=dropout, device=device)
    decoder = DecoderRNN(len(ix2tag)+2, len(ix2tag)+2,
                         DECODER_DIM,BATCH_SIZE,
                         dropout=dropout, device=device)

    encoder = encoder.to(device)
    decoder = decoder.to(device)

    bias = [0.0, 1/alpha]
    bias.extend((1 for i in range(1, len(ix2tag))))
    bias.append(10)
    print(bias)
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))

    # decoder.init_embedding_weights()
    # enc_optim = optim.RMSprop(encoder.parameters(), lr=0.001)
    # dec_optim = optim.RMSprop(decoder.parameters(), lr=0.001)

    enc_optim = optim.RMSprop(encoder.parameters(), lr=0.001)
    dec_optim = optim.RMSprop(decoder.parameters(), lr=0.001)
    if regularization:
        enc_optim = optim.RMSprop(add_regularization(encoder, regularization), lr=0.001)
        dec_optim = optim.RMSprop(add_regularization(decoder, regularization), lr=0.001)

    random.shuffle(train_data)
    random.shuffle(test_data)

    # train_data = test_data
    split = round(0.1 * len(test_data))
    valid_set = test_data[:split]
    train_set = train_data
    test_set = test_data[split:]
    P, R, F1 = 0.0, 0.0, 0.0

    for step in range(epoch):
        losses = []
        random.shuffle(train_set)
        BATCH_SIZE = user_config['batch']
        s_step = len(train_set)//BATCH_SIZE
        for i, batch in enumerate(get_batch(BATCH_SIZE, train_set)):
            sents, tags, distances = zip(*batch)
            BATCH_SIZE = len(sents) if len(sents) < BATCH_SIZE else BATCH_SIZE
            x = torch.tensor(sents, device=device)
            y = torch.tensor(tags, device=device)
            dists = torch.tensor(distances, device=device).float()
            x_lens = torch.tensor(pack_seqs(sents), device=device)
            x_lens_sort, sent2ix_sort = torch.sort(x_lens, descending=True)
            sent2ix_unsort, _ = torch.sort(sent2ix_sort)

            masked = x.gt(0).view(-1)
            encoder_masking = x.lt(1)

            encoder.zero_grad()
            decoder.zero_grad()

            enc_output, hidden_c = encoder(x, x_lens[sent2ix_sort])
            start_decode = torch.tensor([[163]] * BATCH_SIZE, device=device)
            tag_score = decoder(start_decode, hidden_c, enc_output, x_lens[sent2ix_sort])
            pre = torch.argmax(tag_score, 1)
            pre_m = masked.view(-1) * pre.byte()
            tag_loss = tag_criterion(tag_score, y.view(-1))

            loss = tag_loss

            loss.backward()
            losses.append(float(loss))

            # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5.0)
            # torch.nn.utils.clip_grad_norm_(decoder.parameters(), 5.0)

            enc_optim.step()
            dec_optim.step()

            if i % 100 == 0:
                # print(losses)
                logging.info('Epoch: [{}/{}], Step: [{}/{}], Loss: {:.4f}'.format(
                    step+1, epoch, i, s_step, np.mean(losses)
                ))
                losses=[]

        logging.info('============ valid ===============')
        res = test_model(encoder, decoder, valid_set, user_config, ix2tag)
        t_P, t_R, t_F = evaluavtion_triple(res)
        logging.info("Epoch: {}, P, R, F {:.4f} {:.4f} {:.4f}".format(step+1, t_P, t_R, t_F))

        logging.info('============ test ===============')
        res = test_model(encoder, decoder, test_set, user_config, ix2tag)
        P, R, F1 = evaluavtion_triple(res)
        logging.info("P, R, F  {:.4f} {:.4f} {:.4f}".format(P, R, F1))
        save_model(decoder, encoder, cur_config)

    logging.info("Train Complete!")


def save_model(decoder, encoder, config):
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    torch.save(decoder.state_dict(), os.path.join(model_dir, 'decoder_{}.pkl'.format(config)))
    torch.save(encoder.state_dict(), os.path.join(model_dir, 'encoder_{}.pkl'.format(config)))


def test_model(encoder, decoder, data, user_config, ix2tag):
    device = user_config['device']
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    encoder = encoder.to(device)
    decoder = decoder.to(device)

    encoder.train(False)
    decoder.train(False)

    bias = [0.0, 1/alpha]
    bias.extend((1 for i in range(1, len(ix2tag))))
    bias.append(0)
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))


    res = []
    losses = []
    for i, batch in enumerate(get_batch(BATCH_SIZE, data)):
        sents, tags, dists = zip(*batch)
        x = torch.tensor(sents, device=device)
        y = torch.tensor(tags, device=device)
        d = torch.tensor(dists, device=device).float()
        x_lens = torch.tensor(pack_seqs(sents), device=device)
        masked = x.gt(0).view(-1)
        encoder_masking = x.lt(1)
        BATCH_SIZE = len(sents) if len(sents) < BATCH_SIZE else BATCH_SIZE

        enc_output, enc_hidden = encoder(x, x_lens)
        start_decode = torch.tensor([[163]] * BATCH_SIZE, device=device)
        tag_score = decoder(start_decode, enc_hidden, enc_output, encoder_masking)
        pre = torch.argmax(tag_score, 1)
        pre_m = masked.view(BATCH_SIZE,-1) * pre.view(BATCH_SIZE,-1).byte()
        tag = masked.view(BATCH_SIZE,-1) * y.byte()
        tag_loss = tag_criterion(tag_score, y.view(-1))

        loss = tag_loss
        losses.append(float(loss))

        for p_tag,t_tag in zip(pre_m.tolist(), tag.tolist()):
            p_sent = []
            t_sent = []
            for p,t in zip(p_tag, t_tag):
                if p>0:
                    p_sent.append(ix2tag[p])
                    t_sent.append(ix2tag[t])
            res.append([p_sent, t_sent])

    logging.info('Loss: {:.4f}'.format(np.mean(losses)))
    encoder.train(True)
    decoder.train(True)
    return res


if __name__ == '__main__':
    user_config = get_user_paras()
    device = torch.device("cuda:{}".format(user_config['gpu']) if torch.cuda.is_available() else "cpu")
    user_config['device'] = device
    train(user_config)

    # device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    # test_data = pickle.load(open(BASE_DIR+'test_data_p.pkl', 'rb'))
    # res = test_model(test_data)
    # print(evaluavtion_triple(res))
