# -*- coding:utf-8 -*-
# editor: zzh
# date: 2022/8/26

from torch.utils.data import Dataset
import json
from configs import *
from utills import *
import numpy as np
import pickle


class mydataset(Dataset):

    def __init__(self, data_file, dict_file, ent_type='ent', intent_2_ent=None):
        super(mydataset, self).__init__()

        json_data = json.load(open(data_file, 'r', encoding='utf8'))

        self.dialogues = []
        for d in json_data:
            dias = d['dialogue']
            for dia in dias:
                # if(dia['act'] in ['greeting','confirm','other','hold_on','end_confirm']):
                #     continue
                # new_dias = []
                # for dia in dia['info']:
                #     if dia not in new_dias:
                #         new_dias.append(dia)
                # dia['info'] = new_dias
                if len(dia['info']) != 0:
                    self.dialogues.append(dia)

        self.prop_dict = dict_file
        self.intent2ent = intent_2_ent
        # print(len(self.prop_dict))

    def __len__(self):
        return len(self.dialogues)

    def __getitem__(self, index):
        dia = self.dialogues[index]
        mask = [0] * len(self.prop_dict) * 2
        intent = dia['act']
        if self.intent2ent != None:
            if intent in self.intent2ent:
                ents = self.intent2ent[intent]
                for ent in ents:
                    id = self.prop_dict[ent]
                    mask[id * 2 - 1] = 1
                    mask[id * 2] = 1
                mask[0] = 1
            else:
                mask = [1] * len(self.prop_dict) * 2
        else:
            mask = [1] * len(self.prop_dict) * 2
        tokenized_ids, segment_ids, slot_values = self.convert_to_ids(dia['text'], dia['info'], max_len=128)
        # print(np.array(tokenized_ids).shape, np.array(segment_ids).shape, np.array(slot_values).shape, np.array(mask).shape)
        return np.array(tokenized_ids), np.array(segment_ids), np.array(slot_values), np.array(mask)

    def convert_to_ids(self, text, infos, max_len):
        tokenized_ids, segment_ids, tokenized_tokens = tokenize(text, max_len=max_len)
        slot_values = [0] * len(tokenized_ids)

        for info in infos:
            slot = info["value"]
            prop = info['prop']
            # print(slot)
            if prop not in self.prop_dict:
                self.prop_dict[prop] = len(self.prop_dict)
            prop_type = self.prop_dict[prop]
            _, __, slot_tokens = tokenize(slot)
            # print(slot_tokens)
            find = False
            start_ids = -1
            # print(len(tokenized_tokens) , len(slot_tokens))
            slot_tokens = slot_tokens[1:]
            # print(slot_tokens, slot_values, tokenized_tokens)
            try:
                for i in range(0, len(slot_values) - len(slot_tokens)):
                    if find == True:
                        break
                    # print(i)
                    for j in range(0, len(slot_tokens)):
                        if tokenized_tokens[i + j] == slot_tokens[j]:
                            if j == len(slot_tokens) - 1:
                                find = True
                                start_ids = i
                        else:
                            break
            except Exception as e:
                pass

            if find:
                # print('ss')
                # print(len(slot_values), start_ids, len(slot_tokens))
                slot_values[start_ids] = prop_type * 2 - 1
                # print(tokenized_tokens[start_ids], slot_tokens[0])
                for i in range(1, len(slot_tokens)):
                    # print(tokenized_tokens[start_ids + i], slot_tokens[i])
                    slot_values[start_ids + i] = prop_type * 2

        assert len(tokenized_ids) == len(segment_ids) == len(slot_values)

        return tokenized_ids, segment_ids, slot_values



class mydataset2(Dataset):

    def __init__(self, data_file, dict_file):
        super(mydataset2, self).__init__()

        json_data = json.load(open(data_file, 'r', encoding='utf8'))

        self.dialogues = []
        for d in json_data:
            dias = d['dialogue']
            for i in range(1, len(dias)):
                if (len(dias[i]['info']) != 0 or len(dias[i - 1]['info']) != 0):
                    self.dialogues.append((dias[i - 1], dias[i]))
            # for dia in dias:
            #     # new_dias = []
            #     # for dia in dia['info']:
            #     #     if dia not in new_dias:
            #     #         new_dias.append(dia)
            #     # dia['info'] = new_dias
            #     if(len(dia['info']) != 0):
            #         self.dialogues.append(dia)

        self.prop_dict = pickle.load(open(dict_file, 'rb'))
        # print(len(self.prop_dict))

    def __len__(self):
        return len(self.dialogues)

    def __getitem__(self, index):
        dia1, dia2 = self.dialogues[index]
        tokenized_ids1, segment_ids1, slot_values1 = self.convert_to_ids(dia1['text'], dia1['info'])
        tokenized_ids2, segment_ids2, slot_values2 = self.convert_to_ids(dia2['text'], dia2['info'])

        tokenized_ids = tokenized_ids1 + tokenized_ids2

        segment_ids2 = [1 - s for s in segment_ids2]
        segment_ids = segment_ids1 + segment_ids2

        slot_values = slot_values1 + slot_values2

        if len(tokenized_ids) > max_len:
            tokenized_ids = tokenized_ids[:max_len]
            segment_ids = segment_ids[:max_len]
            slot_values = slot_values[:max_len]
        else:
            pad_ids = [0] * (max_len - len(tokenized_ids))
            tokenized_ids = tokenized_ids + pad_ids
            segment_ids = segment_ids + pad_ids
            slot_values = slot_values + pad_ids

        return np.array(tokenized_ids), np.array(segment_ids), np.array(slot_values)

    def convert_to_ids(self, text, infos):
        tokenized_ids, segment_ids, tokenized_tokens = tokenize(text)
        slot_values = [0] * len(tokenized_ids)
        # print(text)
        for info in infos:
            slot = info["value"]
            prop = info['prop']
            # print(slot)
            if prop not in self.prop_dict:
                self.prop_dict[prop] = len(self.prop_dict)
            prop_type = self.prop_dict[prop]
            _, __, slot_tokens = tokenize(slot)
            # print(slot_tokens)
            find = False
            start_ids = -1
            # print(len(tokenized_tokens) , len(slot_tokens))
            slot_tokens = slot_tokens[1:]
            # print(slot_tokens, slot_values, tokenized_tokens)
            try:
                for i in range(0, len(slot_values) - len(slot_tokens)):
                    if find == True:
                        break
                    # print(i)
                    for j in range(0, len(slot_tokens)):
                        if tokenized_tokens[i + j] == slot_tokens[j]:
                            if j == len(slot_tokens) - 1:
                                find = True
                                start_ids = i
                        else:
                            break
            except Exception as e:
                pass

            if find:
                # print('ss')
                # print(len(slot_values), start_ids, len(slot_tokens))
                slot_values[start_ids] = prop_type * 2 - 1
                # print(tokenized_tokens[start_ids], slot_tokens[0])
                for i in range(1, len(slot_tokens)):
                    # print(tokenized_tokens[start_ids + i], slot_tokens[i])
                    slot_values[start_ids + i] = prop_type * 2

        assert len(tokenized_ids) == len(segment_ids) == len(slot_values)

        return tokenized_ids, segment_ids, slot_values


class mydataset_traindata(Dataset):

    def __init__(self, data_file, dict_file, ent_type='ent', intent_2_ent=None):
        super(mydataset_traindata, self).__init__()

        json_data = json.load(open(data_file, 'r', encoding='utf8'))
        self.dialogues = []
        for d in json_data:
            content = d['content']
            for turn in content:
                find_1 = False
                find_2 = False
                try:
                    infos = turn['info']
                    ents = infos['ents']
                    triples = infos['triples']
                    # print(turn)
                    sent1 = turn['[SPEAKER 1]']
                    sent2 = turn['[SPEAKER 2]']

                    new_infos = []
                    speaker1_infos, speaker2_infos = [], []
                    if ent_type == 'ent':
                        for ent in ents:
                            new_infos.append({"value": ent['name'], "prop": ent['type']})
                    else:
                        for tri in triples:
                            # new_infos.append({"value": tri['value'], "prop": tri['prop']})
                            if tri['pos'][0] == 1:
                                speaker1_infos.append({"value": tri['value'], "prop": tri['prop']})
                                find_1 = True
                            else:
                                speaker2_infos.append({"value": tri['value'], "prop": tri['prop']})
                                find_2 = True

                    # if len(new_infos) == 0:
                    #     continue
                    if find_1 and len(speaker1_infos) > 0:
                        self.dialogues.append({'text': sent1, 'info': speaker1_infos, 'intent': turn['客服意图']})
                    if find_2 and len(speaker2_infos) > 0:
                        self.dialogues.append({'text': sent2, 'info': speaker2_infos, 'intent': turn['用户意图']})

                except Exception as e:
                    pass

        self.prop_dict = dict_file
        self.intent2ent = intent_2_ent
        # print(len(self.prop_dict))

    def __len__(self):
        return len(self.dialogues)

    def __getitem__(self, index):
        dia = self.dialogues[index]
        mask = [0] * len(self.prop_dict) * 2
        intent = dia['intent']
        intent = intent.replace(" ", "")
        intent = intent.replace("（", "(")
        intent = intent.replace("）", ")")
        intent = re.sub(r'[(](.*)', '', intent)

        intents = re.split(',|-|，|=|\*|—', intent)

        find = False
        for intent in intents:
            if intent in self.intent2ent:
                ents = self.intent2ent[intent]
                for ent in ents:
                    if ent == "短信":
                        ent = "短信条数"
                    if ent == "欠费":
                        ent = "欠费金额"
                    if ent not in self.prop_dict:
                        # print(ent)
                        continue
                        self.prop_dict[ent] = len(self.prop_dict)
                    id = self.prop_dict[ent]
                    mask[id * 2 - 1] = 1
                    mask[id * 2] = 1
                mask[0] = 1
                find = True
        if not find:
            mask = [1] * len(self.prop_dict) * 2
        tokenized_ids, segment_ids, slot_values = self.convert_to_ids(dia['text'], dia['info'], max_len=128)
        return np.array(tokenized_ids), np.array(segment_ids), np.array(slot_values), np.array(mask)

    def convert_to_ids(self, text, infos, max_len):
        tokenized_ids, segment_ids, tokenized_tokens = tokenize(text, max_len=max_len)
        slot_values = [0] * len(tokenized_ids)
        for info in infos:
            slot = info["value"]
            prop = info['prop']
            if prop == "短信":
                prop = "短信条数"
            if prop == "欠费":
                prop = "欠费金额"
            if prop not in self.prop_dict:
                self.prop_dict[prop] = len(self.prop_dict)
            prop_type = self.prop_dict[prop]
            _, __, slot_tokens = tokenize(slot)
            # print(slot_tokens)
            find = False
            start_ids = -1
            # print(len(tokenized_tokens) , len(slot_tokens))
            slot_tokens = slot_tokens[1:]
            # print(slot_tokens)
            # print(slot_tokens, slot_values, tokenized_tokens)
            try:
                for i in range(0, len(slot_values) - len(slot_tokens)):
                    if find:
                        break
                    # print(i)
                    for j in range(0, len(slot_tokens)):
                        if tokenized_tokens[i + j] == slot_tokens[j]:
                            if j == len(slot_tokens) - 1:
                                find = True
                                start_ids = i
                        else:
                            break
            except Exception as e:
                pass

            if find:
                # print('ss')
                # print(len(slot_values), start_ids, len(slot_tokens))
                slot_values[start_ids] = prop_type * 2 - 1
                # print(tokenized_tokens[start_ids], slot_tokens[0])
                for i in range(1, len(slot_tokens)):
                    # print(tokenized_tokens[start_ids + i], slot_tokens[i])
                    slot_values[start_ids + i] = prop_type * 2

        assert len(tokenized_ids) == len(segment_ids) == len(slot_values)

        return tokenized_ids, segment_ids, slot_values


if __name__ == '__main__':
    # ds = mydataset()
    # ds = mydataset_traindata()
    # for d in ds:
    #     print(d[2])
    # print(len(ds.prop_dict))

    dict_file = pickle.load(open(r'data/TrainingData/tri_prop.pkl', 'rb'))
    t_2_ents = pickle.load(open(r'data/TrainingData/intent2prop09.pkl', 'rb'))

    print(len(dict_file), len(t_2_ents))
    # print(dict_file)
    print(t_2_ents.keys())



