import json
import torch
from torch.utils.data import Dataset
from transformers import BertTokenizer
from config import Config


class NLUDataset(Dataset):
    def __init__(self, data_path, tokenizer, is_test=False):
        self.data = self.load_data(data_path)  # 加载JSON数据
        self.tokenizer = tokenizer
        self.is_test = is_test  # 是否为测试集（测试集无标签）
        self.intent2id = self.id2intent = self.slot2id = self.id2slot = None

        # 训练集/验证集需要生成映射表
        if not is_test:
            self.intent2id, self.id2intent = self.build_intent_map()
            self.slot2id, self.id2slot = self.build_slot_map()

    def load_data(self, path):
        """加载JSON数据，兼容字典（带ID键）和列表格式"""
        with open(path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        # 若为字典（如{"id1": {...}, "id2": {...}}），取values转为列表
        return list(data.values()) if isinstance(data, dict) else data

    def build_intent_map(self):
        """生成意图映射表（意图→ID 和 ID→意图）"""
        intents = set()
        for item in self.data:
            intent = item.get('intent', '')  # 安全获取，避免键不存在
            if intent:  # 跳过空意图
                intents.add(intent)
        # 排序后生成映射（确保每次运行ID一致）
        intent_list = sorted(intents)
        intent2id = {intent: idx for idx, intent in enumerate(intent_list)}
        id2intent = {idx: intent for intent, idx in intent2id.items()}
        return intent2id, id2intent

    def build_slot_map(self):
        """生成槽位映射表（BIO格式）"""
        slots = {'O'}  # 基础标签：非槽位
        for item in self.data:
            # 安全获取slots，默认空字典
            item_slots = item.get('slots', {})
            for slot_name in item_slots:
                # 添加B-（开始）和I-（中间）标签
                slots.add(f'B-{slot_name}')
                slots.add(f'I-{slot_name}')
        # 排序后生成映射
        slot_list = sorted(slots)
        slot2id = {slot: idx for idx, slot in enumerate(slot_list)}
        id2slot = {idx: slot for slot, idx in slot2id.items()}
        return slot2id, id2slot

    def __len__(self):
        """数据集大小"""
        return len(self.data)

    def __getitem__(self, idx):
        """获取单条数据（分词+标签处理）"""
        item = self.data[idx]
        text = item.get('text', '')  # 安全获取文本
        tokens = list(text)  # 按字符分词（适合中文）
        token_ids = self.tokenizer.convert_tokens_to_ids(tokens)

        # 截断或填充到最大长度
        if len(token_ids) > Config.max_seq_len:
            token_ids = token_ids[:Config.max_seq_len]
        else:
            pad_length = Config.max_seq_len - len(token_ids)
            token_ids += [self.tokenizer.pad_token_id] * pad_length

        # 注意力掩码：1=有效token，0=填充
        attention_mask = [1 if tid != self.tokenizer.pad_token_id else 0 for tid in token_ids]

        # 基础返回数据
        result = {
            'input_ids': torch.tensor(token_ids, dtype=torch.long),
            'attention_mask': torch.tensor(attention_mask, dtype=torch.long),
            'text': text  # 保留原始文本，方便后续分析
        }

        # 训练集/验证集添加标签
        if not self.is_test:
            # 意图标签
            intent = item.get('intent', '')
            result['intent_id'] = torch.tensor(
                self.intent2id.get(intent, 0),  # 未知意图默认为0
                dtype=torch.long
            )

            # 槽位标签（BIO格式）
            item_slots = item.get('slots', {})
            slot_labels = ['O'] * len(text)  # 初始化全为非槽位

            for slot_name, value in item_slots.items():
                # 处理单值或多值槽位
                values = [value] if isinstance(value, str) else value
                for val in values:
                    # 查找值在文本中的位置
                    start_idx = text.find(val)
                    if start_idx == -1:
                        continue  # 未找到则跳过
                    end_idx = start_idx + len(val) - 1

                    # 设置B-标签（开始）
                    if start_idx < len(slot_labels):
                        slot_labels[start_idx] = f'B-{slot_name}'
                    # 设置I-标签（中间）
                    for i in range(start_idx + 1, end_idx + 1):
                        if i < len(slot_labels):
                            slot_labels[i] = f'I-{slot_name}'

            # 截断或填充槽位标签到最大长度
            if len(slot_labels) > Config.max_seq_len:
                slot_labels = slot_labels[:Config.max_seq_len]
            else:
                slot_labels += ['O'] * (Config.max_seq_len - len(slot_labels))

            # 转换为ID
            result['slot_ids'] = torch.tensor(
                [self.slot2id[slot] for slot in slot_labels],
                dtype=torch.long
            )

        return result


def get_datasets():
    """加载所有数据集并复用映射表"""
    tokenizer = BertTokenizer.from_pretrained(Config.bert_model)
    # 训练集（生成映射表）
    train_set = NLUDataset(Config.train_path, tokenizer)
    # 验证集（复用训练集映射表）
    dev_set = NLUDataset(Config.dev_path, tokenizer)
    dev_set.intent2id, dev_set.id2intent = train_set.intent2id, train_set.id2intent
    dev_set.slot2id, dev_set.id2slot = train_set.slot2id, train_set.id2slot
    # 测试集（复用训练集映射表，无标签）
    test_set = NLUDataset(Config.test_path, tokenizer, is_test=True)
    test_set.intent2id, test_set.id2intent = train_set.intent2id, train_set.id2intent
    test_set.slot2id, test_set.id2slot = train_set.slot2id, train_set.id2slot

    return train_set, dev_set, test_set, tokenizer
