import torch
from transformers import BertTokenizer, BertForSequenceClassification
from torch.utils.data import DataLoader, Dataset
import pandas as pd

device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 假设你的模型已经被保存在'model.pth'文件中
model_path = 'saved_model'


def eval_data():
    # 加载分词器和模型
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(model_path)
    model.to(device)
    model.eval()

    # 定义数据集
    class CustomDataset(Dataset):
        def __init__(self, texts, labels):
            self.texts = texts
            self.labels = labels

        def __len__(self):
            return len(self.texts)

        def __getitem__(self, idx):
            text = self.texts[idx]
            label = self.labels[idx]
            encoding = tokenizer.encode_plus(
                text,
                max_length=128,  # 可以根据实际情况调整最大长度
                padding='max_length',
                truncation=True,
                return_attention_mask=True,
                return_tensors='pt'
            )
            return {
                'input_ids': encoding['input_ids'].flatten(),
                'attention_mask': encoding['attention_mask'].flatten(),
                'labels': torch.tensor(label, dtype=torch.long)
            }

    # 评估数据
    texts, labels = parse_data()

    dataset = CustomDataset(texts, labels)
    dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

    # 评估模型并计算准确率
    correct_predictions = 0
    total_predictions = 0

    for batch in dataloader:
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)

        with torch.no_grad():
            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            logits = outputs.logits
            predictions = torch.argmax(logits, dim=1)
            # 计算正确预测的数量
            correct_predictions += (predictions == labels).sum().item()
            total_predictions += labels.size(0)

    # 计算准确率
    accuracy = correct_predictions / total_predictions
    print(f"Accuracy: {accuracy * 100:.2f}%")


def parse_data():
    data_path = 'data-bak2/validation.csv'
    label_path = 'data-bak2/label.txt'
    id2label, label2id = parse_labels(label_path)
    df = pd.read_csv(data_path, header=None, sep='\t')
    text_list = df[0].tolist()
    label_list = df[1].tolist()
    label_list = [label2id[label] for label in label_list]
    return text_list, label_list


def parse_labels(label_path):
    id2label = {}
    label2id = {}
    df = pd.read_csv(label_path, header=None, sep='\t')
    for i in range(len(df)):
        id2label[str(i)] = df.iloc[i][0]
        label2id[df.iloc[i][0]] = i
    return id2label, label2id


if __name__ == '__main__':
    eval_data()