import logging
import os
import pickle
import random
import time

import torch
import torch.nn as nn
import torch.optim as optim
import logger
from evaluate import evaluavtion_triple
from model_v2 import EncoderRNN, DecoderRNN, DecoderRNNDist, E2EModel, E2EModel_cus
from config import *
from pre_process import load_data, get_batch, pack_seqs
import numpy as np

from schedule_job import get_logger
from util import get_user_paras, get_train_acc, get_train_tag_acc

BASE_DIR = './data/'
EXP_DIR = './experiments/'
EXP_RESULTS_DIR = './experiments/results/'
EXP_LOG_DIR = './experiments/log/'


def add_regularization(model, weight_decay=2e-5):
    weight_p, bias_p = [], []
    for name, p in model.named_parameters():
        if 'bias' in name:
            bias_p += [p]
        else:
            weight_p += [p]

    return [{'params': weight_p, 'weight_decay':weight_decay},
           {'params': bias_p, 'weight_decay':0}]


def train(user_config, logger=None):
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    device = user_config['device']
    epoch = user_config['epoch']
    regularization = user_config['regularization']
    lr = user_config['lr']
    dropout = user_config['dropout']
    teacher = user_config['teacher']
    logger.info('current_config : batch_size {} alpha {} GPU {} epoch {} regularization {} lr {} '
                'dropout {} teacher {}'
                .format(BATCH_SIZE, alpha, device, epoch, regularization, lr, dropout, teacher))

    train_data, word2index, ix2tag, test_data, _ = load_data('train_data_d.pkl',
                                                          'test_data_d.pkl')
    word_vec = pickle.load(open(BASE_DIR+'wv_matrix.pkl', 'rb'))

    e2emodel = E2EModel_cus(len(word2index)+1, EMBEDDING_DIM,ENCODER_DIM, word_vec,
                        len(ix2tag)+1, DECODER_DIM, dropout, device)
    e2emodel.to(device)

    bias = [1.0, 1.0]
    bias.extend((alpha for i in range(len(ix2tag)-1)))
    print(len(bias))
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))

    # p = list(e2emodel.named_parameters())
    if regularization:
        model_optim = optim.RMSprop(add_regularization(e2emodel, regularization),
                                    lr)
    else:
        # model_optim = optim.RMSprop(e2emodel.parameters(), lr)
        model_optim = optim.RMSprop(e2emodel.parameters(), lr=lr, eps=1e-08, alpha=0.9)
        # model_optim = optim.Adam(e2emodel.parameters(), lr=lr)

    random.shuffle(train_data)
    # random.shuffle(test_data)

    # train_data = test_data
    split = round(0.1 * len(test_data))
    valid_set = test_data[:split]
    train_set = train_data
    test_set = test_data
    P, R, F1 = 0.0, 0.0, 0.0

    losses_all = []
    prf_train = []
    prf_test = []
    e2emodel_m = None

    for step in range(epoch):
        losses_epoch = []
        losses = []
        macro_p = []
        macro_r = []
        random.shuffle(train_set)
        BATCH_SIZE = user_config['batch']
        s_step = len(train_set)//BATCH_SIZE

        for i, batch in enumerate(get_batch(BATCH_SIZE, train_set)):
            sents, tags, distances = zip(*batch)
            BATCH_SIZE = len(sents) if len(sents) < BATCH_SIZE else BATCH_SIZE
            x = torch.tensor(sents, device=device)
            y = torch.tensor(tags, device=device)
            masked = x.gt(0).view(-1)
            target = masked.view(BATCH_SIZE, -1) * y.byte()

            e2emodel.zero_grad()

            tag_score = e2emodel(x, target.long(), teacher_forcing=teacher)
            tag_score = tag_score * masked.unsqueeze(1).float()
            pre_m = torch.argmax(tag_score, 1).view(BATCH_SIZE, -1)
            loss = tag_criterion(tag_score, y.view(-1))

            loss.backward()
            losses.append(float(loss))
            losses_epoch.append(float(loss))

            # torch.nn.utils.clip_grad_norm_(e2emodel.parameters(), 5.0)
            model_optim.step()
            p_train, r_train, f_train = get_train_tag_acc(pre_m, target)
            macro_p.append(p_train)
            macro_r.append(f_train)
            prf_train.append([p_train, r_train, f_train])

            if i % 100 == 0:
                # print(losses)
                # p_train, r_train, f_train = get_train_acc(pre_m, target, ix2tag)
                logging.info('Epoch:[{}/{}], Step:[{}/{}], Loss:{:.4f}, F1:{:.4f}'.format(
                    step+1, epoch, i, s_step, np.mean(losses), np.mean(macro_r)))
                losses=[]
                macro_p=[]

            losses_all.append(losses_epoch)

        # logger.info('============ valid ===============')
        # res = test_model(e2emodel, valid_set, user_config, ix2tag, logger)
        # t_P, t_R, t_F = evaluavtion_triple(res)
        # logger.info("Epoch: {}, P, R, F {:.4f} {:.4f} {:.4f}".format(step+1, t_P, t_R, t_F))
        # prf_valid.append((t_P, t_R, t_F))

        logger.info('============ test ===============')
        res, _ = test_model(e2emodel, test_set, user_config, ix2tag, logger)
        t_P, t_R, t_F1 = evaluavtion_triple(res)
        if t_F1 > F1:
            e2emodel_m_params = e2emodel.state_dict()
            F1 = t_F1
        logger.info("P, R, F  {:.4f} {:.4f} {:.4f}".format(t_P, t_R, t_F1))
        prf_test.append((t_P, t_R, t_F1))

    save_model(e2emodel_m_params, user_config)
    save_loss_prf(losses_all, prf_test, prf_train, user_config)
    logger.info("Train Complete!")
    return [losses_all, prf_train, prf_test]


def test_model(model, data, user_config, ix2tag, logger=None):

    device = user_config['device']
    BATCH_SIZE = user_config['batch']
    alpha = user_config['alpha']
    model.to(device)

    model.train(False)

    bias = [1.0, 1.0]
    bias.extend((alpha for i in range(len(ix2tag)-1)))
    tag_criterion = nn.CrossEntropyLoss(ignore_index=0, weight=torch.tensor(bias, device=device))

    res = []
    losses = []
    s_score = []
    for i, batch in enumerate(get_batch(BATCH_SIZE, data)):
        sents, tags, dists = zip(*batch)
        x = torch.tensor(sents, device=device)
        y = torch.tensor(tags, device=device)
        BATCH_SIZE = len(sents) if len(sents) < BATCH_SIZE else BATCH_SIZE
        masked = x.gt(0).view(-1)
        target = masked.view(BATCH_SIZE, -1) * y.byte()

        tag_score = model(x)
        tag_score = tag_score * masked.unsqueeze(1).float()
        pre_m = torch.argmax(tag_score, 1).view(BATCH_SIZE,-1)
        tag_loss = tag_criterion(tag_score, y.view(-1))
        losses.append(float(tag_loss))
        s_score.append(tag_score)
        for p_tag,t_tag in zip(pre_m.tolist(), target.tolist()):
            p_sent = []
            t_sent = []

            for p,t in zip(p_tag, t_tag):
                if p>0:
                    try:
                        p_sent.append(ix2tag[p])
                        t_sent.append(ix2tag[t])
                    except KeyError:
                        t_sent.append([''])
            res.append([p_sent, t_sent])
    if logger is not None:
        logger.info('Loss: {:.4f}'.format(np.mean(losses)))
    model.train(True)
    return res, s_score


def save_model(model_params, config):
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    date = time.strftime("%Y_%m_%d")
    if not os.path.exists(model_dir+date):
        os.mkdir(model_dir+date)
    path = model_dir+date
    torch.save(model_params, os.path.join(path, 'e2emodel_{}.pkl'.format(time.strftime("%H_%M"))))
    logger = get_logger(date, EXP_LOG_DIR)
    logger.info(config)
    logger.info("save model")


def save_loss_prf(loss, prf, prf_train, config):
    date = time.strftime("%Y_%m_%d")
    if not os.path.exists(EXP_RESULTS_DIR + date):
        os.mkdir(EXP_RESULTS_DIR + date)
    path = EXP_RESULTS_DIR + date + os.sep
    pickle.dump(loss, open(path+'losses_{}'.format(time.strftime("%H_%M")), 'wb'))
    pickle.dump(prf, open(path+'test_prf_{}'.format(time.strftime("%H_%M")), 'wb'))
    pickle.dump(prf_train, open(path+'train_prf_{}'.format(time.strftime("%H_%M")), 'wb'))
    logger = get_logger(date, EXP_LOG_DIR)
    logger.info(config)
    logger.info("save loss")
    logger.info("save prf")
    logger.info("save train——prf")


if __name__ == '__main__':
    logger.setup_logging()
    logger = logging.getLogger('trainer')
    user_config = get_user_paras()
    device = torch.device("cuda:{}".format(user_config['gpu']) if torch.cuda.is_available() else "cpu")
    user_config['device'] = device
    train(user_config, logger)

    # device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    # test_data = pickle.load(open(BASE_DIR+'test_data_p.pkl', 'rb'))
    # res = test_model(test_data)
    # print(evaluavtion_triple(res))
