import torch
import pandas as pd
from transformers import BertTokenizer, BertForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from tqdm import tqdm

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载测试数据
df_test = pd.read_csv('test_data.csv')

# 将标签转换为数字
# label_dict = {'positive': 0, 'negative': 1, 'neutral': 2}
# df_test['label'] = df_test['label'].map(label_dict)

# 加载保存的模型和tokenizer
# model = BertForSequenceClassification.from_pretrained("./sentiment_best_model_accuracy/fold_1")
# tokenizer = BertTokenizer.from_pretrained("./sentiment_best_model_accuracy/fold_1")

# model = BertForSequenceClassification.from_pretrained("./sentiment_best_model_accuracy/fold_2")
# tokenizer = BertTokenizer.from_pretrained("./sentiment_best_model_accuracy/fold_2")
#
# model = BertForSequenceClassification.from_pretrained("./sentiment_best_model_accuracy/fold_3")
# tokenizer = BertTokenizer.from_pretrained("./sentiment_best_model_accuracy/fold_3")
#
# model = BertForSequenceClassification.from_pretrained("./sentiment_best_model_accuracy/fold_4")
# tokenizer = BertTokenizer.from_pretrained("./sentiment_best_model_accuracy/fold_4")
#
model = BertForSequenceClassification.from_pretrained("./sentiment_best_model_accuracy/sentiment_best_model_f1")
tokenizer = BertTokenizer.from_pretrained("./sentiment_best_model_accuracy/sentiment_best_model_f1")

model.to(device)
model.eval()

# 对测试文本进行编码
test_encodings = tokenizer(df_test['comment'].tolist(), truncation=True, padding=True, max_length=128)

# 创建测试数据集
test_dataset = TensorDataset(
    torch.tensor(test_encodings['input_ids']),
    torch.tensor(test_encodings['attention_mask']),
    torch.tensor(df_test['label'].tolist())
)

# 创建测试数据加载器
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)

# 进行预测并收集每个批次的指标
test_losses = []
test_accuracies = []
test_precisions = []
test_recalls = []
test_f1_scores = []
all_y_true = []
all_y_pred = []

with torch.no_grad():
    for batch in tqdm(test_loader, desc="Evaluating"):
        input_ids = batch[0].to(device)
        attention_mask = batch[1].to(device)
        labels = batch[2].to(device)

        outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs.loss
        _, predicted = torch.max(outputs.logits, 1)

        y_true = labels.cpu().numpy()
        y_pred = predicted.cpu().numpy()

        all_y_true.extend(y_true)
        all_y_pred.extend(y_pred)

        test_losses.append(loss.item())
        test_accuracies.append(accuracy_score(y_true, y_pred))
        test_precisions.append(precision_score(y_true, y_pred, average='macro', zero_division=0))
        test_recalls.append(recall_score(y_true, y_pred, average='macro', zero_division=0))
        test_f1_scores.append(f1_score(y_true, y_pred, average='macro', zero_division=0))

# 计算总体评估指标
overall_accuracy = accuracy_score(all_y_true, all_y_pred)
overall_precision = precision_score(all_y_true, all_y_pred, average='macro')
overall_recall = recall_score(all_y_true, all_y_pred, average='macro')
overall_f1 = f1_score(all_y_true, all_y_pred, average='macro')
overall_loss = np.mean(test_losses)

print(f"Test Loss: {overall_loss:.4f}")
print(f"Test Accuracy: {overall_accuracy:.4f}")
print(f"Test Precision: {overall_precision:.4f}")
print(f"Test Recall: {overall_recall:.4f}")
print(f"Test F1-score: {overall_f1:.4f}")

# 绘制混淆矩阵
cm = confusion_matrix(all_y_true, all_y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig('confusion_matrix.png')
plt.close()

# 绘制评估指标曲线
plt.figure(figsize=(12, 8))
plt.plot(test_losses, label='Loss')
plt.plot(test_accuracies, label='Accuracy')
plt.plot(test_precisions, label='Precision')
plt.plot(test_recalls, label='Recall')
plt.plot(test_f1_scores, label='F1-score')
plt.title('Test Set Evaluation Metrics')
plt.xlabel('Batch')
plt.ylabel('Score')
plt.legend()
plt.grid(True)
plt.savefig('test_evaluation_metrics.png')
plt.close()

# 绘制单独的损失曲线
plt.figure(figsize=(10, 6))
plt.plot(test_losses)
plt.title('Test Loss')
plt.xlabel('Batch')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig('test_loss.png')
plt.close()