import json
import importlib
import os
from torch.utils.data.dataloader import DataLoader
import torchmetrics
import random
import logging
logging.getLogger().setLevel(logging.DEBUG)
import warnings

warnings.filterwarnings(
    "ignore", ".*Trying to infer the `batch_size` from an ambiguous collection.*"
)
import torch.nn as nn
import torch.nn.utils
import pytorch_lightning as pl
import plugin
from pytorch_lightning import loggers as pl_loggers
from datetime import datetime
from argparse import ArgumentParser
from transformers import BertTokenizer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from mymetrics import InstanceAccuracy, RankingAccuracy
from dataset import POIDataset, MyCollator

## 不会显示调用，但是需要在plugin中注册
import model
import myloss


class EarlyStoppingWithWarmup(EarlyStopping):
    """
    EarlyStopping, except don't watch the first `warmup` epochs.
    """
    def __init__(self, warmup=6, **kwargs):
        super().__init__(**kwargs)
        self.warmup = warmup

    def on_epoch_end(self, trainer, pl_module):
        if trainer.current_epoch < self.warmup:
            return
        else:
            super().on_epoch_end(trainer, pl_module)


class OCRFilterNet(pl.LightningModule):
    """用于训练的lightningModule, 可以通过dict导入配置生成需要的网络结构.

    """

    def __init__(self, config: dict[str: object], *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.config = config
        for k, v in self.config.items():
            print(k, v)
        self.save_hyperparameters()
        Model = plugin.get_plugin("model", config["model"])
        logging.debug(plugin.DefaultPluginManager.plugin_container)
        logging.debug(f"plugin: {Model}")
        self.model = Model(**config)
        # loss函数默认BCEwith logits
        self.loss_fn = plugin.get_plugin("loss", config["loss"])(**config["loss_param"])
        # 计算指标函数
        self.train_accuracy = torchmetrics.Accuracy()
        self.valid_accuracy = torchmetrics.Accuracy()
        self.train_recall = torchmetrics.Recall()
        self.valid_recall = torchmetrics.Recall()
        self.train_auc = torchmetrics.AUROC()
        self.valid_auc = torchmetrics.AUROC()
        # 自定义计算指标，统计每个用例是否准确，返回基于用例的accuracy
        self.train_instance_accuracy = InstanceAccuracy()
        self.valid_instance_accuracy = InstanceAccuracy()
        self.train_rank_accuracy = RankingAccuracy()
        self.valid_rank_accuracy = RankingAccuracy()
    
    def forward(self, x):
        return self.model(x)
    
    def training_step(self, batch, batch_idx):
        logits, lengths = self.model(batch)
        if torch.any(torch.isnan(logits)) or torch.any(torch.isinf(logits)):
            logging.error("logits")
            logging.error(logits)
            emb = self.model.get_embedding(batch)
            logging.error("embedding")
            logging.error(emb)
            logging.error(torch.any(torch.isnan(emb)))
            logging.error(torch.any(torch.isinf(emb)))
        # 按key重排为一维tensor
        rankorder = batch[3]   # [ocr_total_len]
        labels = (rankorder >= 0).int()
        poi_logits = torch.cat([logits[i, :lengths[i], 0].squeeze(-1) for i in range(len(lengths))])
        flag = self.current_epoch < self.config["warmup_epoch"]
        loss, bce_loss, rankloss = self.loss_fn(logits, rankorder, lengths, flag)
        pred_prob = torch.sigmoid(poi_logits)
        batch_size = len(batch[2])
        self.train_auc.update(pred_prob, labels)
        self.train_accuracy.update(pred_prob, labels)
        self.train_recall.update(pred_prob, labels)
        self.train_instance_accuracy.update(poi_logits, labels, lengths)
        self.train_rank_accuracy.update(logits, rankorder, lengths)
        self.log("train_loss_step", loss.item(), batch_size=batch_size)
        self.log("train_bceloss_step", bce_loss.item(), batch_size=batch_size)
        self.log("train_rankloss_step", rankloss.item(), batch_size=batch_size)
        return loss
    
    def validation_step(self, batch, batch_idx):
        logits, lengths = self.model(batch)
        # 按key重排为一维tensor
        rankorder = batch[3]   # [ocr_total_len]
        labels = (rankorder >= 0).int()
        poi_logits = torch.cat([logits[i, :lengths[i], 0].squeeze(-1) for i in range(len(lengths))])
        flag = self.current_epoch < self.config["warmup_epoch"]
        loss, bce_loss, rankloss = self.loss_fn(logits, rankorder, lengths, flag)
        pred_prob = torch.sigmoid(poi_logits)
        batch_size = len(batch[2])
        self.valid_auc.update(pred_prob, labels)
        self.valid_accuracy.update(pred_prob, labels)
        self.valid_recall.update(pred_prob, labels)
        self.valid_instance_accuracy.update(poi_logits, labels, lengths)
        self.valid_rank_accuracy.update(logits, rankorder, lengths)
        self.log("valid_bceloss_step", bce_loss.item(), batch_size=batch_size)
        self.log("valid_rankloss_step", rankloss.item(), batch_size=batch_size)
        self.log("valid_loss_step", loss.item(), batch_size=batch_size)
        return loss
    
    def configure_optimizers(self):
        Optimizer = plugin.get_plugin("optimizer", self.config["optimizer"])
        other_module_parameters = self.model.get_other_part_parameters()
        param_options = [
            {"params": x} for x in other_module_parameters
        ]
        param_options.append({"params": self.model.bert.parameters(), "lr": self.config["bert_lr"]})
        if hasattr(self.model, "backbone"):
            param_options.append({"params": self.model.backbone.parameters(), "lr": self.config["backbone_lr"]})
        kwargs = {} if "optimizer_params" not in self.config else self.config["optimizer_params"]
        optimizer = Optimizer(
            param_options, lr=self.config["lr"], **kwargs
        )
        Scheduler = plugin.get_plugin("scheduler", self.config["scheduler"])
        scheduler = Scheduler(optimizer, **self.config["scheduler_params"])
        return [optimizer], [scheduler]

    def training_epoch_end(self, outputs) -> None:
        total_loss = torch.mean(torch.Tensor([x["loss"] for x in outputs]))
        total_loss = total_loss.to(self.device)
        train_accu = self.train_accuracy.compute()
        train_recall = self.train_recall.compute()
        instance_accuracy = self.train_instance_accuracy.compute()
        total_accuracy = self.train_rank_accuracy.compute()
        try:
            train_auc = self.train_auc.compute()
        except ValueError:
            train_auc = 0
        self.train_rank_accuracy.reset()
        self.train_accuracy.reset()
        self.train_auc.reset()
        self.train_recall.reset()
        self.train_instance_accuracy.reset()
        self.log("train_accuracy", train_accu)
        self.log("train_rank_perfect_accuracy", total_accuracy)
        self.log("train_auroc", train_auc)
        self.log("train_instance_accuracy", instance_accuracy)
        self.log("train_loss", total_loss)
        self.log("train_recall", train_recall)
        
    def validation_epoch_end(self, outputs) -> None:
        valid_accu = self.valid_accuracy.compute()
        total_loss = torch.mean(torch.Tensor(outputs))
        total_loss = total_loss.to(self.device)
        valid_recall = self.valid_recall.compute()
        total_accuracy = self.valid_rank_accuracy.compute()
        try:
            valid_auc = self.valid_auc.compute()
        except ValueError:
            valid_auc = 0
        instance_accuracy = self.valid_instance_accuracy.compute()
        self.valid_rank_accuracy.reset()
        self.valid_accuracy.reset()
        self.valid_auc.reset()
        self.valid_recall.reset()
        self.valid_instance_accuracy.reset()
        self.log("valid_loss", total_loss)
        self.log("valid_rank_perfect_accuracy", total_accuracy)
        self.log("valid_accuracy", valid_accu)
        self.log("valid_auroc", valid_auc)
        self.log("valid_score", total_accuracy * instance_accuracy)
        self.log("valid_instance_accuracy", instance_accuracy)
        self.log("valid_recall", valid_recall)


def parse_args():
    parser = ArgumentParser()
    parser.add_argument("--config", default="configs/OCRFilterRanker_alpha1_3_relu_512_4_e200.json")
    parser.add_argument("--gpus", type=int, default=1)
    parser.add_argument("--num_workers", type=int, default=8)
    parser.add_argument("--seed", type=int, default=12345)
    parser.add_argument("--monitor", type=str, choices=["valid_score", "valid_loss"], default="valid_loss")
    parser.add_argument("--no_eval", dest="do_eval", action="store_false")
    parser.add_argument("--num_epochs", type=int, default=-1)
    parser.set_defaults(do_eval=True)

    return parser.parse_args()


def main():
    args = parse_args()
    pl.seed_everything(args.seed)
    
    # 加载数据
    train_file = "./preprocess_data/train.json"
    with open(train_file, "r", encoding="utf-8") as f:
        train_data = json.load(f)
    # 切分为训练集与验证集
    random.shuffle(train_data)
    # 加载配置文件
    with open(args.config, "r", encoding="utf-8") as f:
        config = json.load(f)
    logging.info("Config Info:")
    for key in config:
        logging.info(f"{key}: {config[key]}")
    
    val_size = int(config["val_size"] * len(train_data)) if args.do_eval else 0
    # 加载dataset与dataloader
    dataset_args = config["dataset_args"] if "dataset_args" in config else {}
    trainset = POIDataset(train_data[val_size:], **dataset_args)
    valset = POIDataset(train_data[:val_size], **dataset_args)
    tokenizer = BertTokenizer.from_pretrained(config["pretrained_path"], local_files_only=True)
    collator = MyCollator(tokenizer)
    train_loader = DataLoader(trainset, config["batch_size"], shuffle=True, num_workers=args.num_workers, collate_fn=collator, pin_memory=True)
    val_loader = DataLoader(valset, config["batch_size"], num_workers=args.num_workers, collate_fn=collator, pin_memory=True)
    # 计算warmup
    num_steps = len(trainset) / config["batch_size"] * config["epochs"]
    config["scheduler_params"]["num_training_steps"] = num_steps
    config["scheduler_params"]["num_warmup_steps"] = int(num_steps * config["warmup_ratio"])
    # 加载模型
    trainnet = OCRFilterNet(config)
    logging.info("Model Initialize Done")
    logging.info(trainnet)
    # 训练配置
    modelname = args.config.replace(".json", "").replace(".", "").replace("/", "").replace("configs", "")
    tb_logger = pl_loggers.TensorBoardLogger(
        os.path.join("./lightning_logs", modelname, f"seed-{args.seed}-{datetime.now()}")
    )
    if args.do_eval:
        checkpoint_callback = ModelCheckpoint(
            dirpath=f"./models/{modelname}-seed{args.seed}-{args.monitor}",
            monitor="valid_loss",
            mode="min",
            save_last=True,
            filename="model-{epoch:02d}-{valid_loss:.3f}-{valid_instance_accuracy:.3f}-{valid_rank_perfect_accuracy:.3f}"
        )

        early_stop_monitor = EarlyStoppingWithWarmup(
            monitor=args.monitor,
            min_delta=0,
            patience=config["patience"],
            mode="max" if args.monitor == "valid_score" else "min",
            warmup=config["warmup_epoch"]
        )
        trainer = pl.Trainer(
            callbacks=[early_stop_monitor, checkpoint_callback],
            gpus=args.gpus,
            logger=tb_logger,
            gradient_clip_val=config["clip"],
            max_epochs=config["epochs"],
            strategy="ddp",
            auto_select_gpus=True
        )
        trainer.fit(trainnet, train_loader, val_loader)
    else:
        trainer = pl.Trainer(
            gpus=args.gpus,
            logger=tb_logger,
            strategy="ddp",
            auto_select_gpus=True,
            min_epochs=args.num_epochs,
            max_epochs=args.num_epochs,
            default_root_dir=f"./models/{modelname}-seed{args.seed}-full"
        )
        trainer.fit(trainnet, train_loader)
        trainer.save_checkpoint(f"./models/{modelname}-seed{args.seed}-full.ckpt")



if __name__ == "__main__":
    main()
    

    
    
