import torch
from sklearn.metrics import accuracy_score, f1_score
from tqdm import tqdm

from load_dataset import create_masked_inputs, load_tokenized_dataset
from model import BertForPermutationAndMaskedLM

def evaluate_bert_with_permutation(model, dataloader, tokenizer, device):
    """评估同时包含MLM和词序恢复任务的BERT模型"""
    model.eval()
    total_mlm_loss = 0
    total_perm_loss = 0
    mlm_probability = 0.15  # 与训练时保持一致
    
    # 存储MLM任务的预测和标签
    mlm_all_preds = []
    mlm_all_labels = []
    # 存储词序恢复任务的预测和标签
    perm_all_preds = []
    perm_all_labels = []
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc="Evaluating"):
            # 将数据移至设备
            batch = {k: v.to(device) for k, v in batch.items()}
            
            # 准备MLM输入（与训练时相同的掩码策略）
            mlm_inputs = batch["permuted_input_ids"]
            mlm_inputs, mlm_labels = create_masked_inputs(mlm_inputs, tokenizer, device, mlm_probability)
            
            # 获取词序恢复任务的标签
            perm_labels = batch["permutation_labels"]
            
            # 前向传播
            outputs = model(
                mlm_inputs,
                attention_mask=batch["attention_mask"],
                labels=mlm_labels,
                permutation_labels=perm_labels
            )
            
            # 解析损失
            total_loss = outputs.loss if hasattr(outputs, "loss") else outputs[0]
            mlm_loss = outputs.loss if hasattr(outputs, "loss") else outputs[0]
            perm_loss = total_loss - mlm_loss if mlm_loss is not None else total_loss
            
            total_mlm_loss += mlm_loss.item()
            total_perm_loss += perm_loss.item()
            
            # ------------------ MLM任务评估 ------------------
            if mlm_labels is not None:
                mlm_logits = outputs.logits if hasattr(outputs, "logits") else outputs[1]
                mlm_preds = torch.argmax(mlm_logits, dim=-1)
                
                valid_mask = mlm_labels != -100
                valid_mlm_preds = mlm_preds[valid_mask]
                valid_mlm_labels = mlm_labels[valid_mask]
                
                mlm_all_preds.extend(valid_mlm_preds.cpu().tolist())
                mlm_all_labels.extend(valid_mlm_labels.cpu().tolist())
            
            # ------------------ 词序恢复任务评估 ------------------
            if perm_labels is not None:
                # 获取词序恢复预测（兼容对象和元组输出）
                perm_logits = outputs.permutation_logits if hasattr(outputs, "permutation_logits") else outputs[2]
                perm_preds = torch.argmax(perm_logits, dim=-1)
                
                valid_mask = perm_labels != -100
                valid_perm_preds = perm_preds[valid_mask]
                valid_perm_labels = perm_labels[valid_mask]
                
                perm_all_preds.extend(valid_perm_preds.cpu().tolist())
                perm_all_labels.extend(valid_perm_labels.cpu().tolist())
    
    # 计算平均损失
    num_batches = len(dataloader)
    avg_mlm_loss = total_mlm_loss / num_batches
    avg_perm_loss = total_perm_loss / num_batches
    avg_total_loss = (total_mlm_loss + total_perm_loss) / num_batches
    
    # 计算困惑度（仅针对MLM任务）
    perplexity = torch.exp(torch.tensor(avg_mlm_loss))
    
    # 计算MLM任务的准确率和F1分数
    mlm_accuracy = accuracy_score(mlm_all_labels, mlm_all_preds) if mlm_all_labels else 0
    mlm_f1 = f1_score(mlm_all_labels, mlm_all_preds, average='weighted') if mlm_all_labels else 0
    
    # 计算词序恢复任务的准确率和F1分数
    perm_accuracy = accuracy_score(perm_all_labels, perm_all_preds) if perm_all_labels else 0
    perm_f1 = f1_score(perm_all_labels, perm_all_preds, average='weighted') if perm_all_labels else 0
    
    # 打印评估结果
    print("\n===== Evaluation Results =====")
    print(f"Total Loss: {avg_total_loss:.4f}")
    print(f"MLM Loss: {avg_mlm_loss:.4f} | Perplexity: {perplexity:.2f}")
    print(f"Permutation Loss: {avg_perm_loss:.4f}")
    print(f"MLM Accuracy: {mlm_accuracy:.4f} | MLM F1: {mlm_f1:.4f}")
    print(f"Permutation Accuracy: {perm_accuracy:.4f} | Permutation F1: {perm_f1:.4f}")
    
    return {
        "total_loss": avg_total_loss,
        "mlm_loss": avg_mlm_loss,
        "perm_loss": avg_perm_loss,
        "perplexity": perplexity.item(),
        "mlm_accuracy": mlm_accuracy,
        "mlm_f1": mlm_f1,
        "perm_accuracy": perm_accuracy,
        "perm_f1": perm_f1
    }
    
if __name__ == "__main__":
    # 加载数据集（验证集）
    tokenizer, val_dataloader = load_tokenized_dataset(dataset_part="validation", add_permutation=True)
    
    # 加载模型（确保使用修改后的类）
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = BertForPermutationAndMaskedLM.from_pretrained("my-bert-model")
    model.to(device)
    
    # 评估模型
    eval_results = evaluate_bert_with_permutation(model, val_dataloader, tokenizer, device)