import torchtext
from torchtext.data import Field
print(torchtext.__version__)

from torchtext.data.functional import generate_sp_model
generate_sp_model('test.csv', vocab_size=23456, model_prefix='spm_user')






# 准备数据集
QUESTION = Field(tokenize="spacy", lower=True, init_token='<sos>', eos_token='<eos>')
ANSWER = Field(tokenize="spacy", lower=True, init_token='<sos>', eos_token='<eos>')

fields = [('question', QUESTION), ('answer', ANSWER)]
train_data, test_data = TabularDataset.splits(
    path='data',
    train='train_data.csv',
    test='test_data.csv',
    format='csv',
    fields=fields
)




if __name__ == '__main__':
    print("over")



import torchtext.data as data

# 使用Spacy分词器并设置相关参数
spacy_tokenizer = data.get_tokenizer('spacy')
lower_func = lambda x: x.lower()
init_token = '<sos>'
eos_token = '<eos>'

QUESTION = data.Field(sequential=True, tokenize=spacy_tokenizer, preprocessing=lower_func, init_token=init_token, eos_token=eos_token)
ANSWER = data.Field(sequential=True, tokenize=spacy_tokenizer, preprocessing=lower_func, init_token=init_token, eos_token=eos_token)

fields = [('question', QUESTION), ('answer', ANSWER)]

train_data, test_data = data.TabularDataset.splits(
    path='data',
    train='train_data.csv',
    test='test_data.csv',
    format='csv',
    fields=fields,
    skip_header=True  # 添加此参数以跳过CSV文件的头部（如果有）
)