import token
from transformers import AutoModel
from torch.nn import Module, Linear, CrossEntropyLoss, GELU, LayerNorm
import torch
import copy
from torch.nn import functional as F
import pytorch_lightning as pl
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import WhisperProcessor, WhisperForConditionalGeneration


class CustomModel(pl.LightningModule):
    def __init__(self, conf):
        super().__init__()
        self.conf = conf
        self.model = None
        self.writer = pl.loggers.TensorBoardLogger(f"./explogs/", name=self.conf.ckpt)

    def configure_model(self):
        if self.model is not None:
            return
        self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")

    def forward(self, batch):
        return self.model(**batch)

    def training_step(self, batch, batch_idx):
        outputs = self.model(**batch, labels=batch["input_ids"].clone())
        train_loss = outputs.loss
        self.log("train_loss", train_loss.item(), on_step=True, on_epoch=True, prog_bar=True)
        return train_loss

    def validation_step(self, batch, batch_idx):
        outputs = self.model(**batch, labels=batch["input_ids"].clone())
        val_loss = outputs.loss
        self.log("valid_loss", val_loss.item(), on_step=True, on_epoch=True, prog_bar=True)
        return val_loss

    def configure_optimizers(self):
        return torch.optim.AdamW(self.model.parameters(), lr=self.conf.lr)


class CustomModelCheckpoint(pl.Callback):
    def __init__(self, save_step_range, save_interval, model_name):
        super().__init__()
        self.best_val_loss = float("inf")  # 初始化为正无穷大的值
        self.save_step_range = save_step_range  # 保存步数的范围
        self.save_interval = save_interval  # 保存间隔
        self.model_name = model_name

    def on_train_batch_end(self, trainer, *args):
        current_step = trainer.global_step + 1
        if -1 not in self.save_step_range:
            if current_step >= self.save_step_range[0] and current_step <= self.save_step_range[1]:
                if current_step % self.save_interval == 0:
                    trainer.save_checkpoint(f"./ckpt/{self.model_name}_step{current_step}.pth")
        current_val_loss = trainer.callback_metrics.get("valid_loss")
        if current_val_loss is not None and current_val_loss < self.best_val_loss:
            self.best_val_loss = current_val_loss
            trainer.save_checkpoint(f"./ckpt/{self.model_name}best.pth")
