"""
Train and Save Model
"""
import os
import argparse
import model
import time
import pickle
import torch
import torch.nn.functional as F
from torch import optim
import data_utils
import logging


def train(args):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # log: output to console and file
    formatter = logging.Formatter('%(asctime)s - %(message)s')
    file = logging.FileHandler('./log'+time.strftime("%Y.%m.%d_%H.%M.%S", time.localtime())+'.txt',
                               mode='w', encoding='utf-8')
    file.setLevel(logging.INFO)
    file.setFormatter(formatter)

    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    console.setFormatter(formatter)
    
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(file)
    logger.addHandler(console)

    if not os.path.exists(args.model_save_path):
        os.makedirs(args.model_save_path)
    with open(r'./tag_dict.pkl', 'rb') as f:
        tag_dict = pickle.load(f)

    for fold in range(args.num_fold):
        logger.info('========== fold {} =========='.format(fold))

        # load data
        train_data, dev_data = data_utils.load_training_data(fold, args.data_path)

        # Model
        slot_model = model.Slot(args.embedding_dim, args.lstm_hidden_size, args.dropout_rate,
                                    args.batch_size, args.max_len, args.slot_classes)
        intent_model = model.Intent(args.embedding_dim, args.lstm_hidden_size, args.dropout_rate,
                                    args.batch_size, args.max_len, args.intent_classes)

        logger.debug(slot_model)
        logger.debug(intent_model)

        slot_optimizer = optim.Adam(slot_model.parameters(), lr=args.lr)
        intent_optimizer = optim.Adam(intent_model.parameters(), lr=args.lr)

        best_epoch = -1
        best_epoch_slot = -1
        best_f1_score = 0.0
        best_correct_num = 0  # best accuracy score

        for epoch in range(args.epochs):
            slot_loss_history = []
            intent_loss_history = []
            data_generator = data_utils.DataGenerator(train_data, args.batch_size)

            for batch_index in range(data_generator.get_step()):
                sentence, real_len, slot_label, intent_label = next(data_generator.get_batch())

                mask = data_utils.make_mask(args.batch_size, real_len, args.max_len, args.slot_classes).to(device)
                x = torch.tensor(sentence).to(device)
                y_slot = torch.tensor(slot_label).to(device)
                y_slot = data_utils.one_hot(y_slot, args.slot_classes, args.max_len).to(device)

                y_intent = torch.tensor(intent_label).to(device)
                y_intent = data_utils.one_hot(y_intent, args.intent_classes, args.max_len).to(device)

                # Calculate compute graph
                slot_optimizer.zero_grad()  # 清除已存在的梯度，否则梯度将被累加到已存在的梯度。
                intent_optimizer.zero_grad()

                hs = slot_model.encoder(x)
                slot_model.share_memory = hs.clone()

                hi = intent_model.encoder(x)
                intent_model.share_memory = hi.clone()

                slot_logits = slot_model.decoder(hs, intent_model.share_memory.detach())
                log_slot_logits = data_utils.masked_log_softmax(slot_logits, mask, dim=-1)
                slot_loss = -1.0 * torch.sum(y_slot * log_slot_logits)
                slot_loss_history.append(slot_loss.item())
                slot_loss.backward()
                torch.nn.utils.clip_grad_norm_(slot_model.parameters(), 5.0)
                slot_optimizer.step()

                # Asynchronous training
                intent_logits = intent_model.decoder(hi, slot_model.share_memory.detach(), real_len)
                log_intent_logits = F.log_softmax(intent_logits, dim=-1)
                intent_loss = -1.0 * torch.sum(y_intent * log_intent_logits)
                intent_loss_history.append(intent_loss.item())
                intent_loss.backward()
                torch.nn.utils.clip_grad_norm_(intent_model.parameters(), 5.0)
                intent_optimizer.step()

                # Log
                if batch_index % 10 == 0 and batch_index > 0:
                    logger.info('Slot loss: {:.4f}  Intent loss: {:.4f}'.format(sum(slot_loss_history[-10:])/10.0,
                                                                                  sum(intent_loss_history[-10:])/10.0))

            # Evaluation
            correct_num = 0
            TP, FP, FN = 0, 0, 0
            data_generator = data_utils.DataGenerator(dev_data, batch_size=1)
            for batch_index in range(data_generator.get_step()):
                sentence_test, real_len_test, slot_label_test, intent_label_test = next(data_generator.get_batch())
                x_test = torch.tensor(sentence_test).to(device)

                mask_test = data_utils.make_mask(1, real_len_test, args.max_len, args.slot_classes).to(device)
                # Slot model generate hs_test and intent model generate hi_test
                hs_test = slot_model.encoder(x_test)
                hi_test = intent_model.encoder(x_test)

                # Slot
                slot_logits_test = slot_model.decoder(hs_test, hi_test)
                log_slot_logits_test = data_utils.masked_log_softmax(slot_logits_test, mask_test, dim=-1)
                slot_pred_test = torch.argmax(log_slot_logits_test, dim=-1)
                # Intent
                intent_logits_test = intent_model.decoder(hi_test, hs_test, real_len_test)
                log_intent_logits_test = F.log_softmax(intent_logits_test, dim=-1)
                res_test = torch.argmax(log_intent_logits_test, dim=-1)

                if res_test.item() == intent_label_test[0]:
                    correct_num += 1
                if correct_num > best_correct_num:
                    best_correct_num = correct_num
                    best_epoch = epoch

                # Calc slot F1 score
                slot_pred_test = slot_pred_test[0][:real_len_test[0]]
                slot_label_test = slot_label_test[0][:real_len_test[0]]

                slot_pred_test = [int(item) for item in slot_pred_test]
                slot_label_test = [int(item) for item in slot_label_test]

                slot_pred_test = [tag_dict['index2tag'][item] for item in slot_pred_test]
                slot_label_test = [tag_dict['index2tag'][item] for item in slot_label_test]

                pred_chunks = data_utils.get_chunks(['O'] + slot_pred_test + ['O'])
                label_chunks = data_utils.get_chunks(['O'] + slot_label_test + ['O'])

                for pred_chunk in pred_chunks:
                    if pred_chunk in label_chunks:
                        TP += 1
                    else:
                        FP += 1
                for label_chunk in label_chunks:
                    if label_chunk not in pred_chunks:
                        FN += 1

            f1_score = 100.0 * 2 * TP / (2 * TP + FN + FP)
            if f1_score > best_f1_score:
                best_f1_score = f1_score
                best_epoch_slot = epoch

                torch.save(intent_model.state_dict(),
                           os.path.join(args.model_save_path, str(fold) + 'model_intent_best.state_dict'))
                torch.save(slot_model.state_dict(),
                           os.path.join(args.model_save_path, str(fold) + 'model_slot_best.state_dict'))

            logger.info('Epoch: [{}/{}], Intent Val Acc: {:.4f}  Slot F1 score: {:.4f}'.format(epoch + 1, args.epochs,
                        100.0 * correct_num / len(dev_data), f1_score))
            logger.info('Best Intent Acc: {:.4f} at Epoch: [{}]'.format(100.0 * best_correct_num / len(dev_data), best_epoch + 1))
            logger.info('Best F1 score: {:.4f} at Epoch: [{}]'.format(best_f1_score, best_epoch_slot + 1))

            # Early Stopping
            if epoch - best_epoch_slot >= args.patience:
                logger.info('Val slot f1 does not improve since from epoch_intent epoch_slot')
                break
        logger.info('fold: ' + str(fold) + ', ' + 'Accuracy: ' + str(best_correct_num/len(dev_data)) +
                    ', ' + 'F1 Score: ' + str(best_f1_score))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--epochs', type=int, default=200, help='training epochs')
    parser.add_argument('--intent_classes', type=int, default=24, help='')
    parser.add_argument('--slot_classes', type=int, default=125, help='')
    parser.add_argument('--max_len', type=int, default=30, help='maximum sequence length')
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    parser.add_argument('--embedding_dim', type=int, default=768, help='')
    parser.add_argument('--lstm_hidden_size', type=int, default=200, help='')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--dropout_rate', type=float, default=0.3, help='dropout rate')
    parser.add_argument('--num_fold', type=int, default=5, help='5 fold cross validation')
    parser.add_argument('--patience', type=int, default=15, help='early stopping patient')
    parser.add_argument('--model_save_path', default='../../result_and_model/slot_filling')
    parser.add_argument('--data_path', default='../../data/npy')
    args = parser.parse_args()
    print(args)
    train(args)
