# from torchtext.datasets import AG_NEWS
# from torchtext.data.utils import get_tokenizer
# from torchtext.vocab import build_vocab_from_iterator

# # 加载训练集
# train_iter = AG_NEWS(split='train')

# # 定义分词器
# tokenizer = get_tokenizer('basic_english')

# # 定义一个生成器函数，用于生成 token 序列
# def yield_tokens(data_iter):
#     for _, text in data_iter:
#         yield tokenizer(text)

# # 构建词汇表
# vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
# vocab.set_default_index(vocab["<unk>"])

# # 测试词汇表
# test_text = "This is a test sentence."
# tokens = tokenizer(test_text)
# indices = vocab(tokens)
# print("Tokens:", tokens)
# print("Indices:", indices)


from torchtext.vocab import build_vocab_from_iterator

# 模拟可迭代对象，其中每个元素是一个分词后的文本
texts = [
    ["hello", "world"],
    ["this", "is", "a", "test"],
    ["hello", "this", "is", "a", "sample"]
]
# texts = [
# "hello", "world","this", "is", "a", "test","hello", "this", "is", "a", "sample"
# ]

# 构建词汇表，设置最小频率为 1，添加特殊 token "<unk>"
vocab = build_vocab_from_iterator(
    texts,
    min_freq=1,
    specials=["<unk>"],
    special_first=True
)

# 设置默认索引为 "<unk>" 的索引
vocab.set_default_index(vocab["<unk>"])

# 测试词汇表
test_text = ["hello", "is", "unknown_word","a"]
tokens = test_text
indices = vocab(tokens)
print("Tokens:", tokens)
print("Indices:", indices)
print(vocab)
print(len(vocab))



'''
大的词汇表
'''

# from torchtext.vocab import build_vocab_from_iterator
# from torchtext.data.utils import get_tokenizer

# # 假设 texts 是一个非常大的文件，这里模拟一个大文件的读取过程
# def large_text_generator(file_path, batch_size=100):
#     tokenizer = get_tokenizer('basic_english')
#     with open(file_path, 'r', encoding='utf-8') as file:
#         batch = []
#         for line in file:
#             # 对每行文本进行分词
#             tokens = tokenizer(line.strip())
#             batch.append(tokens)
#             if len(batch) == batch_size:
#                 # 达到 batch_size 时，生成这一批次的 token 序列
#                 yield from batch
#                 batch = []
#         # 处理最后一批不足 batch_size 的数据
#         if batch:
#             yield from batch

# # 假设文件路径
# file_path = 'large_text_file.txt'

# # 使用生成器构建词汇表
# vocab = build_vocab_from_iterator(
#     large_text_generator(file_path),
#     min_freq=1,
#     specials=["<unk>"],
#     special_first=True
# )

# # 获取词汇表的词汇总数
# vocab_size = len(vocab)
# print(f"词汇表中的词汇总数为: {vocab_size}")