# your_dataset_loader.py (修正版)

from datasets import load_dataset
from transformers import AutoTokenizer


def load_squad_style_dataset(tokenizer, train_flat_file, eval_flat_file):
    """
    加载并预处理扁平化后的SQuAD格式数据集。

    Args:
        tokenizer: transformers 的分词器实例。
        train_flat_file (str): 扁平化后的训练集 .jsonl 文件路径。
        eval_flat_file (str): 扁平化后的评估集 .jsonl 文件路径。
    """
    # 1. 直接加载扁平化后的 .jsonl 文件
    # 这次加载后，数据集将直接包含 'context', 'question', 'answers' 等列
    raw_datasets = load_dataset('json', data_files={'train': train_flat_file, 'eval': eval_flat_file})

    max_length = 384
    doc_stride = 128

    def preprocess_function(examples):
        # 这里的 examples["question"] 现在可以正常工作了！
        questions = [q.strip() for q in examples["question"]]

        tokenized_examples = tokenizer(
            questions,
            examples["context"],
            truncation="only_second",
            max_length=max_length,
            stride=doc_stride,
            return_overflowing_tokens=True,
            return_offsets_mapping=True,
            padding="max_length",
        )

        sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
        offset_mapping = tokenized_examples.pop("offset_mapping")

        tokenized_examples["start_positions"] = []
        tokenized_examples["end_positions"] = []

        for i, offsets in enumerate(offset_mapping):
            input_ids = tokenized_examples["input_ids"][i]
            cls_index = input_ids.index(tokenizer.cls_token_id)
            sequence_ids = tokenized_examples.sequence_ids(i)

            sample_index = sample_mapping[i]
            # 这里的 answers 是一个列表，例如 [{'text': '...', 'answer_start': ...}] 或 []
            answers = examples["answers"][sample_index]

            # 如果答案列表为空 (is_impossible=True 的情况)
            # --- 修正点 1：检查列表长度 ---
            if len(answers) == 0:
                tokenized_examples["start_positions"].append(cls_index)
                tokenized_examples["end_positions"].append(cls_index)
            else:
                # --- 修正点 2：先从列表中取出字典，再访问键 ---
                # 获取唯一的答案字典
                answer = answers[0]
                start_char = answer["answer_start"]
                end_char = start_char + len(answer["text"])

                token_start_index = 0
                while sequence_ids[token_start_index] != 1:
                    token_start_index += 1

                token_end_index = len(input_ids) - 1
                while sequence_ids[token_end_index] != 1:
                    token_end_index -= 1

                if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
                    tokenized_examples["start_positions"].append(cls_index)
                    tokenized_examples["end_positions"].append(cls_index)
                else:
                    while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
                        token_start_index += 1
                    tokenized_examples["start_positions"].append(token_start_index - 1)

                    while offsets[token_end_index][1] >= end_char:
                        token_end_index -= 1
                    tokenized_examples["end_positions"].append(token_end_index + 1)

        return tokenized_examples

    tokenized_datasets = raw_datasets.map(
        preprocess_function,
        batched=True,
        remove_columns=raw_datasets["train"].column_names
    )

    return tokenized_datasets['train'], tokenized_datasets['eval']