import torch
from transformers import BertForSequenceClassification, BertTokenizer
import pandas as pd
from torch.utils.data import TensorDataset, DataLoader

# 加载预训练模型和tokenizer
model_name = 'bert-base-uncased'
model_path = 'E:/bert-base'  # 模型保存路径
tokenizer = BertTokenizer.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BertForSequenceClassification.from_pretrained(model_path)
model.to(device)

# 加载预测集数据从cleantest.csv
df = pd.read_csv('E:/nlp-getting-started/cleanedtest.csv')  # 加载包含id列的CSV文件
texts = df['text'].tolist()  # 假设文本数据在'text'列中

# 使用tokenizer将文本转换为模型输入
encoded_texts = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
input_ids = encoded_texts['input_ids'].to(device)
attention_masks = encoded_texts['attention_mask'].to(device)
ids = df['id'].tolist()  # 获取id列数据

# 将数据转化为TensorDataset格式
dataset = TensorDataset(input_ids, attention_masks)

# 设置batch size和数据加载器
batch_size = 32
dataloader = DataLoader(dataset, batch_size=batch_size)

# 开始预测
model.eval()
predictions = []

for batch in dataloader:
    batch = tuple(t.to(device) for t in batch)
    inputs = {'input_ids': batch[0],
              'attention_mask': batch[1]}

    with torch.no_grad():
        outputs = model(**inputs)

    logits = outputs.logits
    predictions.extend(torch.argmax(logits, axis=1).tolist())

# 生成预测结果文件，使用cleantest.csv中的id列作为ID
df_pred = pd.DataFrame({'id': ids, 'target': predictions})
df_pred.to_csv('E:/nlp-getting-started/predictions.csv', index=False)

print("预测结果已保存为 predictions.csv")