import pandas as pd
import numpy as np
from transformers import BertTokenizer
import torch

# 加载数据集
cleaned_data_file = 'E:/nlp-getting-started/cleanedtrain.csv'
df_train = pd.read_csv(cleaned_data_file)

# 加载BERT分词器
model_name = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(model_name)

# 拼接文本和关键词列
sentences = df_train['text'] + ' ' + df_train['keyword']
sentences = sentences.tolist()

# 使用分词器进行向量化
sentences_tokenizer = tokenizer(sentences,
                                truncation=True,
                                padding=True,
                                max_length=128,
                                add_special_tokens=True)

# 转化为numpy数组
sentences_tokenizer['input_ids'] = np.array(sentences_tokenizer['input_ids'])
sentences_tokenizer['attention_mask'] = np.array(sentences_tokenizer['attention_mask'])

# 转化为PyTorch张量
data = {}
data['input_ids'] = torch.tensor(sentences_tokenizer['input_ids'])
data['attention_mask'] = torch.tensor(sentences_tokenizer['attention_mask'])
data['labels'] = torch.tensor(df_train['target'])

# 保存数据为.npy文件
np.save('E:/nlp-getting-started/sentences_tokenizer.npy', data)

print("数据已保存为 sentences_tokenizer.npy")

