import os
import json
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence


# TODO: __construct_input中向service，act，slot，value之间插入显式的区分token
# TODO: node_type_ids，注意顺序和graphs一一对应

# TODO：考虑向输入中加入prefix
# TODO: 补充utterance的input_ids，padding时用-100（暂不需要，让T5自己去算decode_input_ids）

# TODO: globe和service/act看成同一类型


class MyDataset(Dataset):
    def __init__(self, args, tokenizer, mode):
        super(MyDataset, self).__init__()
        self.args = args
        self.data = list()
        self.tokenizer = tokenizer

        # 将数据解析为dict
        single_data_path = os.path.join(args.loader_path, mode, 'single_value_data.json')
        multi_data_path = os.path.join(args.loader_path, mode, 'multi_value_data.json')
        with open(multi_data_path, 'r') as file:
            self.data.extend(json.load(file))

        if not args.multi_only:
            with open(single_data_path, 'r') as file:
                self.data.extend(json.load(file))

        # Booking [ Inform ( ) ] Hotel [ Inform ( stars = no , several ) Select ( ) ] </s>
        self.service_start = '['
        self.action_start = '('
        self.slot_start = '='
        self.value_sep = ','
        self.slot_end = ';'
        self.action_end = ')'
        self.service_end = ']'

        for data in tqdm(self.data, desc=f'preparing {mode} dataset'):
            # 构建模型输入
            data.update(self._construct_input(data['actions_dict']))
            # 构建模型输出
            data['labels'] = torch.tensor(self.tokenizer(data['utterance']).input_ids, dtype=torch.long)

    def _construct_input(self, actions_dict: dict):
        """
        :param actions_dict: key为service，value为actions，每个action都是包含act, slot, desc, value作为键值的dict
        :return: dict
        """

        input_text = list()  # 将dict结构的MR转为序列结构的MR，e.g：intent ( s = v )
        input_ids = list()  # 将序列结构的MR tokenize，作为模型的输入，e.g：input_text中各元素对应的下标

        def update_input(e):
            input_text.append(e)
            input_ids.extend(self.tokenizer(e, add_special_tokens=False).input_ids)

        for service, actions in actions_dict.items():
            update_input(service)
            update_input(self.service_start)

            for action in actions:
                update_input(action['act'])
                update_input(self.action_start)

                for s_id, (slot, value_list) in enumerate(zip(action['slots'], action['values'])):
                    if s_id > 0:
                        update_input(self.slot_end)

                    update_input(slot)
                    update_input(self.slot_start)

                    for v_id, value in enumerate(value_list):
                        if v_id > 0:
                            update_input(self.value_sep)
                        update_input(value)

                update_input(self.action_end)

            update_input(self.service_end)

        update_input(self.tokenizer.eos_token)

        attention_mask = [1] * len(input_ids)
        return {
            'input_text': ' '.join(input_text),
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'attention_mask': torch.tensor(attention_mask, dtype=torch.long),
        }

    def __getitem__(self, item):
        return self.data[item]

    def __len__(self):
        return len(self.data)
