# 加载数据工具类
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
import json
from config import Config
import time

# todo 加载配置
conf = Config()


def iter_json_array_objects(file_path, max_rows=None):
    """
    以低内存方式迭代读取超大JSON数组文件中的每个对象。
    通过花括号深度计数提取单个对象字符串，再json.loads解析，避免一次读取巨大缓冲区。
    文件格式: [ {obj1}, {obj2}, ... ]
    """
    with open(file_path, 'r', encoding='utf-8') as f:
        # 寻找数组起始'['
        ch = f.read(1)
        while ch and ch.isspace():
            ch = f.read(1)
        if ch != '[':
            raise ValueError('JSON array must start with [')
        count = 0
        while True:
            # 跳过空白与逗号，定位到对象起始'{',或数组结束']'
            ch = f.read(1)
            while ch and ch.isspace():
                ch = f.read(1)
            if not ch:
                break
            if ch == ']':
                break
            if ch == ',':
                continue
            if ch != '{':
                # 允许前面有换行或空白，其他字符视为格式错误
                raise ValueError(f'Unexpected char before object: {ch!r}')
            # 读取一个完整对象，基于花括号计数与字符串转义状态
            sb = ['{']
            depth = 1
            in_string = False
            escape = False
            while True:
                c = f.read(1)
                if not c:
                    raise ValueError('Unexpected EOF inside JSON object')
                sb.append(c)
                if in_string:
                    if escape:
                        escape = False
                    elif c == '\\':
                        escape = True
                    elif c == '"':
                        in_string = False
                else:
                    if c == '"':
                        in_string = True
                    elif c == '{':
                        depth += 1
                    elif c == '}':
                        depth -= 1
                        if depth == 0:
                            break
            obj_str = ''.join(sb)
            obj = json.loads(obj_str)
            yield obj
            count += 1
            if max_rows is not None and count >= max_rows:
                return


# todo 1.加载并处理原始数据
def load_raw_data(file_path, max_rows=None):
    """
    从嵌入数据文件中提取训练所需(text, label)对：
    - text 使用 processed_text
    - label 使用 accusation_category 经由 conf.label2id 映射为整数类别
    """
    result = []
    for obj in tqdm(iter_json_array_objects(file_path, max_rows=max_rows), desc=f"加载原始数据 {file_path}"):
        text = obj.get('processed_text')
        cat = obj.get('accusation_category')
        if not text or not cat:
            continue
        label = conf.label2id.get(cat)
        if label is None:
            continue
        result.append((text, label))
    return result


# todo 2.自定义数据集
class TextDataset(Dataset):
    # 初始化数据
    def __init__(self, data_list):
        self.data_list = data_list

    # 返回数据集长度
    def __len__(self):
        return len(self.data_list)

    # 根据样本索引,返回对应的特征和标签
    def __getitem__(self, idx):
        text, label = self.data_list[idx]
        return text, label


# todo 3.批量处理数据
"""
每当 DataLoader 从 Dataset 中取出一批batch 的原始数据后，
就会调用 collate_fn 来对这个 batch 进行统一处理（如填充、转换为张量等）。
"""

def collate_fn(batch):
    """
    对batch数据进行padding处理
    参数: batch: 包含(文本, 标签)元组的batch数据
    返回: tuple: 包含处理后的input_ids, attention_mask和labels的元组
    """
    texts, labels = zip(*batch)
    # 对文本进行padding
    text_tokens = conf.tokenizer.batch_encode_plus(
        texts,
        add_special_tokens=True,
        padding='max_length',
        max_length=conf.pad_size,
        truncation=True,
        return_attention_mask=True
    )
    input_ids = torch.tensor(text_tokens['input_ids'])
    attention_mask = torch.tensor(text_tokens['attention_mask'])
    labels = torch.tensor(labels)
    return input_ids, attention_mask, labels


# todo 4.构建dataloader

def build_dataloader(max_rows=None):
    # 加载原始数据（可限制行数以控制内存）
    train_data_list = load_raw_data(conf.train_datapath, max_rows=max_rows)
    dev_data_list = load_raw_data(conf.dev_datapath, max_rows=max_rows)
    test_data_list = load_raw_data(conf.test_datapath, max_rows=max_rows)

    # 构建数据集
    train_dataset = TextDataset(train_data_list)
    dev_dataset = TextDataset(dev_data_list)
    test_dataset = TextDataset(test_data_list)

    # 构建DataLoader
    train_dataloader = DataLoader(train_dataset, batch_size=conf.batch_size, shuffle=False, collate_fn=collate_fn)
    dev_dataloader = DataLoader(dev_dataset, batch_size=conf.batch_size, shuffle=False, collate_fn=collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=conf.batch_size, shuffle=False, collate_fn=collate_fn)

    return train_dataloader, dev_dataloader, test_dataloader


if __name__ == '__main__':
    # 测试build_dataloader方法（限制样本数，避免一次性读完大文件导致内存压力）
    train_dataloader, dev_dataloader, test_dataloader = build_dataloader(max_rows=2000)
    print(len(train_dataloader))
    print(len(dev_dataloader))
    print(len(test_dataloader))

    # 取一个batch进行验证
    for i, batch in enumerate(train_dataloader):
        input_ids, attention_mask, labels = batch
        print("input_ids形状: ", input_ids.shape)
        print("attention_mask形状: ", attention_mask.shape)
        print("labels形状: ", labels.shape)
        break
