# -*- coding: utf-8 -*-
"""

@Time    : 2019/7/10 0:48
@Author  : PeterV
@FileName: ensemble.py
@Software: PyCharm
"""
import numpy as np
import torch
import torch.nn.functional as F
import os
import pickle
import json

# Intent类别数量
intent_num = 24

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
total_epoch = 150  # 总轮次
max_len = 30  # 句子最大词数
batch = 16  # 批量大小
learning_rate = 0.001  # 学习率
DROPOUT = 0  # 丢弃概率
fold = 5  # 5折交叉验证


data_path = 'data/bertdata'
model_path = 'model'

result_path = 'result_and_model'

embedding_size = 768
lstm_hidden_size = 200

intent2index = {'LAUNCH': 0,
                'QUERY': 1,
                'ROUTE': 2,
                'SENDCONTACTS': 3,
                'SEND': 4,
                'REPLY': 5,
                'REPLAY_ALL': 6,
                'LOOK_BACK': 7,
                'NUMBER_QUERY': 8,
                'POSITION': 9,
                'PLAY': 10,
                'DEFAULT': 11,
                'DIAL': 12,
                'TRANSLATION': 13,
                'OPEN': 14,
                'CREATE': 15,
                'FORWARD': 16,
                'VIEW': 17,
                'SEARCH': 18,
                'RISERATE_QUERY': 19,
                'DOWNLOAD': 20,
                'nan': 21,
                'DATE_QUERY': 22,
                'CLOSEPRICE_QUERY': 23
                }

index2intent = {value: key for key, value in intent2index.items()}
# print(index2intent)

x_train_npy = os.path.join(data_path, 'train_x0.npy')
x_dev_npy = os.path.join(data_path, 'dev_x0.npy')
x_test_npy = os.path.join(data_path, 'test_x.npy')

x_train_txt = os.path.join(data_path, 'train_x0.txt')
x_dev_txt = os.path.join(data_path, 'dev_x0.txt')
x_test_txt = os.path.join(data_path, 'test_x.txt')

intent_train_npy = os.path.join(data_path, 'train_y0.npy')
intent_dev_npy = os.path.join(data_path, 'dev_y0.npy')
intent_test_npy = os.path.join(data_path, 'test_y.npy')

slot_train = os.path.join(data_path, 'train_x0_slot_PadHead.txt')
slot_dev = os.path.join(data_path, 'dev_x0_slot_PadHead.txt')
slot_test = os.path.join(data_path, 'test_x_slot_PadHead.txt')

vocab_slot_file = os.path.join(data_path, 'vocab.slot')

dev_result_file = os.path.join(result_path, 'dev_log.csv')

slot_dict = {}

with open(vocab_slot_file) as f:
    for i, line in enumerate(f.readlines()):
        slot_dict[line.strip()] = i


def makeindex(x_npy, y_npy, slot, x_sentence):
    train_data = []
    train_x = np.load(x_npy)
    train_intent = np.load(y_npy)
    train_slot = []
    train_len = []
    train_sen = []
    num_Unknown = 0
    with open(slot) as f:
        for line in f.readlines():
            line = line.strip()
            sample_slot = []
            for word_slot in line.split(' '):
                try:
                    sample_slot.append(slot_dict[word_slot])

                except KeyError:
                    sample_slot.append(slot_dict['O'])
                    num_Unknown += 1

            if len(sample_slot) < 30:
                sample_slot.extend([slot_dict['<PAD>']] * (30 - len(sample_slot)))  # slot padding

            train_slot.append(sample_slot)
        print('num_Unknown')
        print(num_Unknown)

    with open(x_sentence, encoding='utf-8') as f:
        for line in f.readlines():
            line = line.strip()
            train_sen.append(line)
            train_len.append(len(line) + 1)

    print('lenTX')
    print(len(train_x))
    print('lenTS')
    print(len(train_slot))
    print('lenTraLen')
    print(len(train_len))
    print('LenTraIntent')
    print(len(train_intent))

    assert len(train_x) == len(train_slot) == len(train_len) == len(train_intent) == len(train_sen)
    for i in range(len(train_x)):
        train_data.append([train_x[i], train_len[i], train_slot[i], train_intent[i], train_sen[i]])

    return train_data


def getTestData():
    test_data = makeindex(x_test_npy, intent_test_npy, slot_test, x_test_txt)

    return test_data


index2slot_dict = {}
for key in slot_dict:
    index2slot_dict[slot_dict[key]] = key


def get_batch(data, batch_size=batch):
    # random.shuffle(data)
    sindex = 0
    eindex = batch_size
    while eindex <= len(data):

        sentence = []
        real_len = []
        slot_label = []
        intent_label = []
        word_sentence = []

        batch = data[sindex:eindex]
        for m in range(sindex, eindex):
            sentence.append(data[m][0])
            real_len.append(data[m][1])
            slot_label.append(data[m][2])
            intent_label.append(data[m][3])
            word_sentence.append(data[m][4])

        temp = eindex
        eindex = eindex + batch_size
        sindex = temp

        yield (sentence, real_len, slot_label, intent_label, word_sentence)


def get_chunks(labels):
    chunks = []
    start_idx, end_idx = 0, 0
    for idx in range(1, len(labels) - 1):
        chunkStart, chunkEnd = False, False
        if labels[idx - 1] not in ('O', '<pad>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            prevTag, prevType = labels[idx - 1][:1], labels[idx - 1][2:]
        else:
            prevTag, prevType = 'O', 'O'
        if labels[idx] not in ('O', '<pad>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            Tag, Type = labels[idx][:1], labels[idx][2:]
        else:
            Tag, Type = 'O', 'O'
        if labels[idx + 1] not in ('O', '<pad>', '<unk>', '<s>', '</s>', '<STOP>', '<START>'):
            nextTag, nextType = labels[idx + 1][:1], labels[idx + 1][2:]
        else:
            nextTag, nextType = 'O', 'O'

        if (Tag == 'B' and prevTag in ('B', 'I', 'O')) or (prevTag, Tag) in [('O', 'I'), ('E', 'E'), ('E', 'I'),
                                                                             ('O', 'E')]:
            chunkStart = True
        if Tag != 'O' and prevType != Type:
            chunkStart = True

        if (Tag in ('B', 'I') and nextTag in ('B', 'O')) or (Tag == 'E' and nextTag in ('E', 'I', 'O')):
            chunkEnd = True
        if Tag != 'O' and Type != nextType:
            chunkEnd = True

        if chunkStart:
            start_idx = idx
        if chunkEnd:
            end_idx = idx
            chunks.append((start_idx, end_idx, Type))
            start_idx, end_idx = 0, 0
    return chunks


def make_mask(real_len, max_len=max_len, label_size=len(slot_dict), batch=batch):
    mask = torch.zeros(batch, max_len, label_size)
    for index, item in enumerate(real_len):
        mask[index, :item, :] = 1.0
    return mask


def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
    if mask is not None:
        mask = mask.float()
        while mask.dim() < vector.dim():
            mask = mask.unsqueeze(1)

        vector = vector + (mask + 1e-45).log()
    return torch.nn.functional.log_softmax(vector, dim=dim)


def one_hot(array, Num=len(slot_dict), maxlen=max_len):
    shape = array.size()
    batch = shape[0]
    if len(shape) == 1:
        res = torch.zeros(batch, Num)
        for i in range(batch):
            res[i][array[i]] = 1
    else:
        res = torch.zeros(batch, maxlen, Num)
        for i in range(batch):
            for j in range(maxlen):
                if array[i, j] == Num:
                    pass
                else:
                    res[i][j][array[i, j]] = 1

    return res


def writeJson(all_dict):
    file = open('predictor_0710方案4.json', 'w', encoding='utf-8')
    json.dump(all_dict, file, ensure_ascii=False)


def SingleModelDomain2Intent(domain, intent2index, intent_score):
    """模型引导domain"""
    assert type(intent_score) == torch.Tensor
    assert intent_score.shape[0] == 1
    assert intent_score.shape[1] == len(intent2index)
    assert len(intent_score.shape) == 2
    print(domain)
    file = open("./domain2intent.json", encoding='utf-8', mode='r')
    domain2intent = json.load(file)
    # print(domain2intent)
    domain_intents = domain2intent[domain]
    remain_index = []
    for intent in intent2index:
        if intent in domain_intents:
            remain_index.append(intent2index[intent])
    for i in range(intent_score.shape[1]):
        if i not in remain_index:
            intent_score[0, i] = 0.0
        else:
            print(i)
            continue
    # #归一化返回分数
    # for i, score in intent_score[0, :]:
    #     intent_score[0, i] = score / np.sum(intent_score, axis=1)
    print(intent_score)
    return intent_score


def integrationTest():
    test_data = getTestData()
    print('test_data len')
    print(len(test_data))
    label_size = len(slot_dict)
    slot_models = []
    intent_models = []
    vocab_dicts = []
    standard_vocab_dict = pickle.load(open(os.path.join(data_path, 'TagDict.pkl'), 'rb'))
    for i in range(fold):
        slot_models.append(torch.load(os.path.join(model_path, str(i) + 'model_slot_best.ckpt')))
        intent_models.append(torch.load(os.path.join(model_path, str(i) + 'model_intent_best.ckpt')))

        tmp = {}

    correct_num = 0
    TP, FP, FN = 0, 0, 0
    all_dict = []
    for batch_index, data_test in enumerate(get_batch(test_data, batch_size=1)):
        tmp_slot = {}
        tmp_all = {}
        # print(len(data_test))
        # print(data_test)
        sentence_test, real_len_test, slot_label_test, intent_label_test, word_sentence = data_test
        all_log_slot_logits_test = torch.zeros(1, max_len, label_size).to(device)
        all_log_intent_logits_test = torch.zeros(intent_num).to(device)
        for slot_model, intent_model in zip(slot_models, intent_models):
            # print(sentence[0].shape, real_len.shape, slot_label.shape)
            x_test = torch.tensor(sentence_test).to(device)

            mask_test = make_mask(real_len_test, batch=1).to(device)
            # Slot model generate hs_test and intent model generate hi_test
            hs_test = slot_model.enc(x_test)
            hi_test = intent_model.enc(x_test)

            # Slot
            slot_logits_test = slot_model.dec(hs_test, hi_test)
            log_slot_logits_test = masked_log_softmax(slot_logits_test, mask_test, dim=-1)
            all_log_slot_logits_test += log_slot_logits_test
            # Intent
            intent_logits_test = intent_model.dec(hi_test, hs_test, real_len_test)
            all_log_intent_logits_test = F.log_softmax(intent_logits_test, dim=-1)

        # all_log_intent_logits_test = SingleModelDomain2Intent()

        slot_pred_test = torch.argmax(all_log_slot_logits_test, dim=-1)
        res_test = torch.argmax(all_log_intent_logits_test, dim=-1)

        # intent_pred_test
        if res_test.item() == intent_label_test[0]:
            correct_num += 1

        # slot_pred_test
        slot_pred_test = slot_pred_test[0][:real_len_test[0]]
        slot_label_test = slot_label_test[0][:real_len_test[0]]

        slot_pred_test = [int(item) for item in slot_pred_test]
        slot_label_test = [int(item) for item in slot_label_test]

        slot_pred_test = [index2slot_dict[item] for item in slot_pred_test]
        slot_label_test = [index2slot_dict[item] for item in slot_label_test]

        # print('slot_pred_test')
        # print(slot_pred_test)
        # print('slot_label_test')
        # print(slot_label_test)

        pred_chunks = get_chunks(['O'] + slot_pred_test + ['O'])
        label_chunks = get_chunks(['O'] + slot_label_test + ['O'])

        print('pred chunks')
        print(pred_chunks)

        for item in pred_chunks:
            if item[2] != 'AD>' and item[2] != 'O':
                # print(word_sentence)
                # print('item 0',item[0])
                # print('item 1',item[1])
                # print(word_sentence[0][item[0]-2:item[1]-1])
                tmp_slot[item[2]] = word_sentence[0][item[0] - 2:item[1] - 1]

        tmp_all['text'] = word_sentence
        tmp_all['intent'] = index2intent[res_test.item()]
        tmp_all['slots'] = tmp_slot

        correct_slots_cnt = 0
        for pred_chunk in pred_chunks:
            if pred_chunk in label_chunks:
                print('in')
                print(pred_chunk)
                print(label_chunks)
                TP += 1
                correct_slots_cnt += 1
            else:
                FP += 1
        for label_chunk in label_chunks:
            if label_chunk not in pred_chunks:
                FN += 1

        if correct_slots_cnt == len(label_chunks):
            tmp_all['slot_correct'] = 1
        else:
            tmp_all['slot_correct'] = 0

        all_dict.append(tmp_all)

    F1_score = 100.0 * 2 * TP / (2 * TP + FN + FP)
    print('Intent Val Acc: {:.4f} \t Slot F1 score: {:.4f}'.format(100.0 * correct_num / len(test_data), F1_score))

    writeJson(all_dict)

    print('len all dict')
    print(len(all_dict))


integrationTest()
