import torch
from transformers import AutoTokenizer
from sklearn.metrics import accuracy_score, f1_score

def evaluate_bert(model, dataloader, device):
    model.eval()
    total_loss = 0
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in dataloader:
            # 将数据移至设备
            batch = {k: v.to(device) for k, v in batch.items()}
            
            inputs = batch["input_ids"]
            labels = batch["labels"]
            
            # 前向传播
            outputs = model(inputs, attention_mask=batch["attention_mask"], labels=labels)
            
            loss = outputs.loss
            total_loss += loss.item()
            
            # 获取预测结果
            logits = outputs.logits
            preds = torch.argmax(logits, dim=-1)

            # 过滤掉非掩码位置
            valid_mask = labels != -100
            valid_preds = preds[valid_mask]
            valid_labels = labels[valid_mask]

            all_preds.extend(valid_preds.cpu().tolist())
            all_labels.extend(valid_labels.cpu().tolist())
    
    avg_loss = total_loss / len(dataloader)
    print(f"Evaluation Loss: {avg_loss:.4f}")
    perplexity = torch.exp(torch.tensor(avg_loss))
    print(f"Perplexity: {perplexity:.2f}")
    
    accuracy = accuracy_score(all_labels, all_preds)
    f1 = f1_score(all_labels, all_preds, average='weighted')
    print(f"Accuracy: {accuracy:.4f}")
    print(f"F1 Score: {f1:.4f}")

if __name__ == "__main__":
    from load_dataset import load_tokenized_dataset
    from transformers import BertConfig, BertForMaskedLM
    
    model_path = "my-bert-model"
    
    # 加载数据集
    tokenizer, tokenized_datasets = load_tokenized_dataset(dataset_part="validation")

    # 加载模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    config = BertConfig.from_pretrained(model_path)
    model = BertForMaskedLM.from_pretrained(model_path, config=config)
    model.to(device)
    
    # 评估
    evaluate_bert(model, tokenized_datasets, device)