from imdb_reward_model import OPTTokenLevelClassifier
import torch
from sklearn.metrics import accuracy_score, f1_score
from transformers import OPTForSequenceClassification
import torch.nn as nn
from datasets import load_dataset
from transformers import AutoModel, AutoTokenizer
from torch.utils.data import DataLoader
from tqdm import tqdm

def evaluate(model, val_loader, device):
    model.eval()  # 将模型设置为评估模式
    val_loss = 0
    all_preds = []
    all_labels = []
    print("start evaluation")
    with torch.no_grad():  # 禁用梯度计算
        for batch in tqdm(val_loader):
            # 将数据移动到设备
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)

            # 前向传播
            outputs = model.forward_value_for_evaluation(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            # outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs["loss"]
            logits = outputs["logits"]

            # 累积损失
            val_loss += loss.item()

            # 获取预测结果
            preds = torch.argmax(logits, dim=-1).cpu().numpy()
            all_preds.extend(preds)
            all_labels.extend(labels.cpu().numpy())

    # 计算平均损失
    avg_val_loss = val_loss / len(val_loader)

    # 计算准确率和F1分数
    accuracy = accuracy_score(all_labels, all_preds)
    f1 = f1_score(all_labels, all_preds, average="weighted")  # 对于多分类任务，使用weighted F1

    return {
        "val_loss": avg_val_loss,
        "accuracy": accuracy,
        "f1_score": f1
    }


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # (1) 加载数据集
    data_files = {"train": "IMDB_test.csv",
                  "test": "IMDB_train.csv"}
    dataset = load_dataset(
        "csv",
        data_dir="/root/autodl-tmp/fcrlhf/DeepSpeed-Chat/datasets/IMDb_movie_reviews",
        data_files=data_files
    )


    # (2) 预处理函数
    def preprocess_function(examples):
        # 假设数据列名为 "text" 和 "label"
        # 如果列名不同需要修改
        return {
            # "text": [text.replace("<br />", " ") for text in examples["text"]],  # 清理HTML标签
            "text": examples["text"],  # 清理HTML标签
            "label": examples["label"]
        }


    # 应用预处理
    dataset = dataset.map(preprocess_function, batched=True)


    # (3) 转换为PyTorch Dataset
    class IMDBDataset(torch.utils.data.Dataset):
        def __init__(self, dataset, tokenizer, max_length=512):
            self.texts = dataset["text"]
            self.labels = dataset["label"]
            self.tokenizer = tokenizer
            self.max_length = max_length

        def __len__(self):
            return len(self.texts)

        def __getitem__(self, idx):
            text = self.texts[idx]
            label = self.labels[idx]

            encoding = self.tokenizer(
                text,
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt"
            )

            return {
                "input_ids": encoding["input_ids"].squeeze(),  # [seq_len]
                "attention_mask": encoding["attention_mask"].squeeze(),
                "labels": torch.tensor(label, dtype=torch.long)
            }


    # # 创建训练集和验证集
    save_path = "/root/autodl-tmp/fcrlhf/DeepSpeed-Chat/tests/output"
    tokenizer = AutoTokenizer.from_pretrained(save_path)
    val_dataset = IMDBDataset(dataset["test"], tokenizer)

    model = OPTTokenLevelClassifier(opt_model_name = "/root/autodl-tmp/DeepSpeedExamples/applications/fcrlhf/DeepSpeed-Chat/models/opt-350m").to(device)

    # 加载参数
    model.load_state_dict(torch.load(f"{save_path}/pytorch_model.bin"))
    print("加载成功")
    # 将模型设为评估模式
    model.eval()

    # 加载验证集
    val_loader = DataLoader(val_dataset, batch_size=8)

    # 评估模型
    eval_results = evaluate(model, val_loader, device)

    # 打印评估结果
    print(f"Validation Loss: {eval_results['val_loss']:.4f}")
    print(f"Accuracy: {eval_results['accuracy']:.4f}")
    print(f"F1 Score: {eval_results['f1_score']:.4f}")
