import torch
import torchtext
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from torchtext.datasets import WikiText2
from torchtext.transforms import VocabTransform
from torchtext.vocab import build_vocab_from_iterator

def tokenizer(text):
    return text.split()


def yield_tokens(data_iter):
    for text in data_iter:
        yield tokenizer(text)


# 数据加载器函数
def collate_batch(batch):
    labels, texts = zip(*batch)
    texts = [text_transform(tokenizer(text)) for text in texts]
    labels = [1 if label == "pos" else 0 for label in labels]
    return torch.tensor(labels, dtype=torch.float32).to(device), torch.tensor(texts, dtype=torch.int64).to(device)


# 数据集下载会报错，到百度上去下载：https://aistudio.baidu.com/datasetdetail/230431
# 加载数据集 root的默认值为：C:\Users\Administrator\.cache\torch\text
# 还要在后面拼接上datasets/[数据集名称]/
train_data, val_data, test_data = WikiText2(root='./')

# 创建 DataLoader
train_loader = DataLoader(list(train_data), batch_size=32, shuffle=True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_VOCAB_SIZE = 25000

# 构建词汇表
vocab = build_vocab_from_iterator(yield_tokens(train_data),
                                  specials=["<unk>", "<pad>", "<bos>", "<eos>"],
                                  max_tokens=MAX_VOCAB_SIZE)
# 设置默认索引为 <unk>
vocab.set_default_index(vocab["<unk>"])
# 定义文本转换函数
text_transform = VocabTransform(vocab)

# 打印词汇表大小
print(f"Vocabulary size: {len(vocab)}")

# 创建 DataLoader
train_loader = DataLoader(list(train_data), batch_size=8)
test_loader = DataLoader(list(test_data), batch_size=8)

# print(train_loader)
# 测试 DataLoader
for texts in train_loader:
    # print(f"Labels: {labels}")
    print(len(texts))
    print(f"Texts: {texts}")
    break
