from torch.utils.data import IterableDataset, DataLoader
import json
from transformers import AutoTokenizer
import torch


class IterableAFQMC(IterableDataset):
    def __init__(self, data_file):
        self.data_file = data_file

    def __iter__(self):
        with open(self.data_file, 'rt', encoding='utf-8') as f:
            for line in f:
                sample = json.loads(line.strip())
                yield sample


checkpoint = "bert-base-chinese"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)


def collate_fn(batch_samples):
    batch_sentence_1, batch_sentence_2 = [], []
    batch_label = []
    for sample in batch_samples:
        batch_sentence_1.append(sample['sentence1'])
        batch_sentence_2.append(sample['sentence2'])
        batch_label.append(int(sample['label']))
    X = tokenizer(
        batch_sentence_1,
        batch_sentence_2,
        padding=True,
        truncation=True,
        return_tensors="pt"
    )
    y = torch.tensor(batch_label)
    return X, y


# 确保文件路径正确
data_path = 'data/afqmc_public/train.json'

# 创建数据集和数据加载器
try:
    train_data = IterableAFQMC(data_path)
    # 对于IterableDataset，设置shuffle=False，改用DataLoader的shuffle功能
    train_dataloader = DataLoader(train_data, batch_size=4, shuffle=False, collate_fn=collate_fn)

    batch_X, batch_y = next(iter(train_dataloader))
    print('batch_X shape:', {k: v.shape for k, v in batch_X.items()})
    print('batch_y shape:', batch_y.shape)
    print(batch_X)
    print(batch_y)
except FileNotFoundError as e:
    print(f"文件不存在: {e}")
except Exception as e:
    print(f"其他错误: {e}")
