import pytorch_lightning as pl
import torch
from model import build_model_and_tokenizer
from evaluation_utils import compute_ELn, compute_MA
import pandas as pd
import os

class UnlearnLitModule(pl.LightningModule):
    def __init__(self, model, tokenizer, cfg):
        super().__init__()
        self.save_hyperparameters(cfg)
        self.model = model
        self.tokenizer = tokenizer
        self.cfg = cfg
        self.learning_rate = cfg["learning_rate"]
        self.val_metrics = {}
        self.validation_step_outputs = []

    def clear_val_metrics(self):
        self.val_metrics = {}

    def forward(self, input_ids, attention_mask, labels):
        return self.model(input_ids, attention_mask=attention_mask, labels=labels)

    def training_step(self, batch, batch_idx):
        batch = dict(batch)  # 复制，避免修改原始 batch
        batch.pop("dataset_name", None)  # 去掉干扰字段
        out = self(**batch)
        # if self.cfg.get("mode") == "unlearn":
        print("[INFO] =========unlearn training==========")
        loss = -out.loss  # 最大化原始 loss（即遗忘训练目标）
        self.log("unlearn_loss", loss, prog_bar=True)
        return loss
        # self.log("train_loss", out.loss, prog_bar=True)
        # return out.loss

    def validation_step(self, batch, batch_idx):
        input_ids = batch["input_ids"]
        attention_mask = batch.get("attention_mask", None)
        labels = batch.get("labels", None)

        # 处理 dataset_name 是 tensor 或 list 的情况
        dataset_name = batch.get("dataset_name", "val")
        if isinstance(dataset_name, (list, tuple)) and len(dataset_name) > 0:
            dataset_name = dataset_name[0]
        if hasattr(dataset_name, "item"):
            dataset_name = dataset_name.item()

        outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs.loss
        logits = outputs.logits

        if self.cfg.get("compute_metrics", True):  # 控制是否启用指标计算
            for i in range(input_ids.size(0)):
                label_seq = labels[i].tolist()
                logit_seq = logits[i]
                pred_ids = torch.argmax(logit_seq, dim=-1).tolist()

                try:
                    el10 = compute_ELn(pred_ids, label_seq, n=10)
                    ma = compute_MA(logit_seq, torch.tensor(label_seq).to(logit_seq.device))
                except Exception as e:
                    self.print(f"[WARN] Metric compute failed: {e}")
                    el10, ma = 0.0, 0.0
                if self.cfg.get("compute_lm_acc", True):
                    try:
                        correct = sum(p == l for p, l in zip(pred_ids, label_seq))
                        acc = correct / max(len(label_seq), 1)
                    except:
                        acc = 0.0
                else:
                    acc = None
                self.validation_step_outputs.append({
                    "dataset": dataset_name,
                    "val_el10": torch.tensor(el10),
                    "val_ma": torch.tensor(ma),
                    "val_acc": torch.tensor(acc) if acc is not None else None,
                    "val_loss": loss.detach()
                })
        return {"val_loss": loss}


    def config_optimizer(self):
        return torch.optim.AdamW(self.parameters(), lr=5e-5)

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.learning_rate)

        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)

        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": scheduler,
                "interval": "epoch",
                "frequency": 1,
            }
        }
    
    def on_validation_epoch_end(self):
        # 按验证集分组统计平均指标
        from collections import defaultdict
        grouped_metrics = defaultdict(list)
        grouped_losses = defaultdict(list)
        grouped_acc = defaultdict(list)
        for out in self.validation_step_outputs:
            dataset = out["dataset"]
            grouped_metrics[dataset].append((out["val_el10"], out["val_ma"]))
            if "val_loss" in out:
                grouped_losses[dataset].append(out["val_loss"])
            if "val_acc" in out and out["val_acc"] is not None:
                grouped_acc[dataset].append(out["val_acc"])

        for name, values in grouped_metrics.items():
            el10_mean = torch.stack([v[0] for v in values]).mean().item()
            ma_mean = torch.stack([v[1] for v in values]).mean().item()
            if name in grouped_losses and grouped_losses[name]:
                val_loss_mean = torch.stack(grouped_losses[name]).mean()
                self.log(f"{name}/val_loss", val_loss_mean, prog_bar=True, on_epoch=True)
            else:
                val_loss_mean = None

            # 分类任务准确率记录
            if name in grouped_acc and grouped_acc[name]:
                acc_mean = torch.stack(grouped_acc[name]).float().mean().item()
                self.log(f"{name}/val_acc", acc_mean, prog_bar=False, on_epoch=True)
            else:
                acc_mean = 0.0

            self.val_metrics[name] = {
                "val_el10": el10_mean,
                "val_ma": ma_mean,
                "val_acc": acc_mean
            }

        if self.cfg["valid_sets"][0] in grouped_losses and grouped_losses[self.cfg["valid_sets"][0]]:
            val_loss_main = torch.stack(grouped_losses[self.cfg["valid_sets"][0]]).mean()
            self.log("val_loss", val_loss_main, prog_bar=True, on_epoch=True)

        self.validation_step_outputs.clear()

