# -*- coding: utf-8 -*-
"""
集成：5个模型在测试集上的结果
@Time    : 2019/7/10 0:48
@Author  : PeterV
@FileName: ensemble.py
@Software: PyCharm
"""
import numpy as np
import logging
import time
import torch
import torch.nn.functional as F
import os
import data_utils
import pickle
import argparse
import json


def SingleModelDomain2Intent(domain, intent2index, intent_score):
    """模型引导domain"""
    assert type(intent_score) == torch.Tensor
    assert intent_score.shape[0] == 1
    assert intent_score.shape[1] == len(intent2index)
    assert len(intent_score.shape) == 2
    print(domain)
    file = open("./domain2intent.json", encoding='utf-8', mode='r')
    domain2intent = json.load(file)
    # print(domain2intent)
    domain_intents = domain2intent[domain]
    remain_index = []
    for intent in intent2index:
        if intent in domain_intents:
            remain_index.append(intent2index[intent])
    for i in range(intent_score.shape[1]):
        if i not in remain_index:
            intent_score[0, i] = 0.0
        else:
            print(i)
            continue
    # #归一化返回分数
    # for i, score in intent_score[0, :]:
    #     intent_score[0, i] = score / np.sum(intent_score, axis=1)
    print(intent_score)
    return intent_score


def integration_test(args):
    """
    集成测试
    :param args:
    :return:
    """
    # log
    logger = logging.getLogger(__name__)
    formatter = logging.Formatter('%(asctime)s - %(message)s')
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    console.setFormatter(formatter)
    file_handler = logging.FileHandler('./test_log'+time.strftime("%Y.%m.%d_%H.%M.%S", time.localtime())+'.txt',
                                       mode='w', encoding='utf-8')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(console)
    logger.addHandler(file_handler)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    test_data = data_utils.load_testing_data(args.data_path)
    logger.debug('Testing Data Length: ' + str(len(test_data)))

    # Load Model
    slot_models = []
    intent_models = []
    for fold in range(args.num_fold):
        slot_models.append(torch.load(os.path.join(args.model_save_path, str(fold) + 'model_slot_best.state_dict')))
        intent_models.append(torch.load(os.path.join(args.model_save_path, str(fold) + 'model_intent_best.state_dict')))

    correct_num = 0
    TP, FP, FN = 0, 0, 0
    all_dict = []
    data_generator = data_utils.DataGenerator(test_data, batch_size=1)
    for batch_index in range(data_generator.get_step()):
        tmp_slot = {}
        tmp_all = {}

        sentence, real_len, slot_label, intent_label, word_sentence = next(data_generator.get_batch())
        log_slot_logits = torch.zeros(1, args.max_len, args.slot_classes).to(device)
        log_intent_logits = torch.zeros(args.intent_classes).to(device)

        for slot_model, intent_model in zip(slot_models, intent_models):
            x = torch.tensor(sentence).to(device)

            mask_test = data_utils.make_mask(1, real_len, args.max_len, args.slot_classes).to(device)
            # Slot model generate hs_test and intent model generate hi_test
            hs_test = slot_model.encoder(x)
            hi_test = intent_model.encoder(x)

            # Slot
            slot_logits = slot_model.decoder(hs_test, hi_test)
            log_slot_logits_test = data_utils.masked_log_softmax(slot_logits, mask_test, dim=-1)
            log_slot_logits += log_slot_logits_test
            # Intent
            intent_logits_test = intent_model.decoder(hi_test, hs_test, real_len)
            log_intent_logits = F.log_softmax(intent_logits_test, dim=-1)

        # all_log_intent_logits_test = SingleModelDomain2Intent()

        slot_pred_test = torch.argmax(log_slot_logits, dim=-1)
        res_test = torch.argmax(log_intent_logits, dim=-1)

        # intent_pred_test
        if res_test.item() == intent_label[0]:
            correct_num += 1

        # slot_pred_test
        slot_pred_test = slot_pred_test[0][:real_len[0]]
        slot_label = slot_label[0][:real_len[0]]

        slot_pred_test = [int(item) for item in slot_pred_test]
        slot_label = [int(item) for item in slot_label]

        slot_pred_test = [index2slot_dict[item] for item in slot_pred_test]
        slot_label = [index2slot_dict[item] for item in slot_label]

        pred_chunks = data_utils.get_chunks(['O'] + slot_pred_test + ['O'])
        label_chunks = data_utils.get_chunks(['O'] + slot_label + ['O'])

        print('pred chunks', pred_chunks)

        for item in pred_chunks:
            if item[2] != 'AD>' and item[2] != 'O':
                tmp_slot[item[2]] = word_sentence[0][item[0] - 2:item[1] - 1]

        tmp_all['text'] = word_sentence
        tmp_all['intent'] = index2intent[res_test.item()]
        tmp_all['slots'] = tmp_slot

        correct_slots_cnt = 0
        for pred_chunk in pred_chunks:
            if pred_chunk in label_chunks:
                print('in')
                print(pred_chunk)
                print(label_chunks)
                TP += 1
                correct_slots_cnt += 1
            else:
                FP += 1
        for label_chunk in label_chunks:
            if label_chunk not in pred_chunks:
                FN += 1

        if correct_slots_cnt == len(label_chunks):
            tmp_all['slot_correct'] = 1
        else:
            tmp_all['slot_correct'] = 0

        all_dict.append(tmp_all)

    F1_score = 100.0 * 2 * TP / (2 * TP + FN + FP)
    print('Intent Acc: {:.4f}  Slot F1 score: {:.4f}'.format(100.0 * correct_num / len(test_data), F1_score))

    file = open('predictor_0710方案4.json', 'w', encoding='utf-8')
    json.dump(all_dict, file, ensure_ascii=False)

    print('len all dict', len(all_dict))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--intent_classes', type=int, default=24, help='')
    parser.add_argument('--slot_classes', type=int, default=125, help='')
    parser.add_argument('--max_len', type=int, default=30, help='maximum sequence length')
    parser.add_argument('--batch_size', type=int, default=16, help='batch size')
    parser.add_argument('--embedding_dim', type=int, default=768, help='')
    parser.add_argument('--lstm_hidden_size', type=int, default=200, help='')
    parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
    parser.add_argument('--num_fold', type=int, default=5, help='5 fold cross validation')
    parser.add_argument('--model_save_path', default='./model')
    parser.add_argument('--data_path', default='../../data/npy')
    args = parser.parse_args()
    print(args)
    integration_test(args)
