import sys
import os
from tqdm import tqdm
import torch

# 获取项目根目录 (不这么搞的话，下面无法导包不了 
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../src'))
sys.path.insert(0, project_root)
print(f'项目根目录 方便import测试: {project_root}')

from config import Bert_Config
from transformers import BertTokenizer

# 模拟大数据量
text = ['今天天气不错'] * 100012
print(f"总样本数量: {len(text)}")

params = Bert_Config()
tokenizer = BertTokenizer.from_pretrained(params.bert_path)

batch_size = 100
all_encoded_inputs = []

for i in tqdm(range(0, len(text), batch_size), desc="Tokenizing"):
    batch_text = text[i:i+batch_size]
    encoded_input = tokenizer(batch_text, padding=True, truncation=True, return_tensors='pt')
    
    # 拆分成单条样本
    for j in range(encoded_input['input_ids'].shape[0]):
        single_sample = {k: v[j] for k, v in encoded_input.items()}
        all_encoded_inputs.append(single_sample)

print(all_encoded_inputs[0])  # 打印第一个样本
print(len(all_encoded_inputs))  # 这里就是 100000
