# -*- coding: UTF-8 -*-
# *******************************************************************
# File Name: train_vgg
# > Author: 04000387
# > Created Time: 2024/12/26 11:27
# *******************************************************************
import torch
from torch.utils.data import DataLoader
from torch.nn import Transformer
from torch import optim
from pathlib import Path


class TrainTransformer:
    def __init__(self, model, loss_fn, optimizer, warmup_step=1500,
                 device="cpu", batch_size=16, test_batch_size=256, show_size=4, src_pad_idx=151646, tgt_pad_idx=151646,
                 save_model_iter=4, collect_fn=None):
        self.device = torch.device(device)
        self.loss_fn = loss_fn
        self.optimizer = optimizer

        self.save_model_iter = save_model_iter
        self.batch_size = batch_size
        self.test_batch_size = test_batch_size

        self.model = model.to(self.device)

        self.show_size = show_size

        self.src_pad_idx = src_pad_idx
        self.tgt_pad_idx = tgt_pad_idx

        self.init_lr = self.optimizer.param_groups[0]["lr"]
        self.lr_schedule = optim.lr_scheduler.LambdaLR(self.optimizer,
                                                       [WarmupPolicy(self.init_lr, warm_step=warmup_step, min_lr=1e-4)])

        self.metric = dict()

        self.collect_fn = collect_fn

    def run(self, train_dataset, test_dataset=None, epoch=100):

        loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, collate_fn=self.collect_fn)

        # 构造数据集
        loaders = {"train": DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)}
        set_item_nums = {"train": len(train_dataset) // self.batch_size}
        if test_dataset is not None:
            loaders["test"] = DataLoader(test_dataset, batch_size=self.test_batch_size)
            set_item_nums["test"] = len(test_dataset) // self.test_batch_size

        best_loss = 1e+9
        test_best_loss = 1e+9
        set_item_num = len(train_dataset) // self.batch_size

        for i in range(epoch):
            loss_val = 0.0

            save_loss_val = 0.0

            for mode in loaders:
                if mode == "train":
                    self.model.train()
                else:
                    self.model.eval()

                for k, (img, content, boxes) in enumerate(loader, start=1):
                    self.optimizer.zero_grad()
                    input_ids = content["input_ids"].to(self.device)
                    attention_mask = content["attention_mask"].to(self.device)
                    img = img.to(self.device)

                    boxes = boxes.to(self.device)
                    loss_step_val, outputs = self._one_step(img,
                                                            input_ids[:, :-1],
                                                            attention_mask[:, :-1],
                                                            input_ids[:, 1:],
                                                            boxes[:, :-1, :],
                                                            mode=mode)

                    loss_val += loss_step_val

                    save_loss_val += loss_step_val

                    if k % self.save_model_iter == 0 and save_loss_val < best_loss:
                        self._check_and_mkdir()
                        torch.save(self.model.state_dict(), "../checkpoints/best_model.pth")

                        print("the best save loss:[{:8.4f}]".format(save_loss_val / self.save_model_iter))
                        # 重置loss并记录最好的loss:
                        best_loss = save_loss_val
                        save_loss_val = 0.0

                    metric_string = ""
                    if mode != "train":
                        metric_string = self.cal_metrix(outputs, input_ids[:, 1:])
                    if self.show_size is not None and k % self.show_size == 0:
                        print("mode:{}\t[{:4d}|{:4d} - {:4d}|{:4d}]: loss_val:{:8.4f} - metric score:{}"
                              .format(mode, i + 1, epoch, k // self.show_size, set_item_num // self.show_size,
                                      loss_val / self.show_size, metric_string))
                        loss_val = 0.0

    def _one_step(self, *args, **kwargs):

        with torch.set_grad_enabled(kwargs["mode"] == "train"):
            outputs = self.model(*args[:-2])
            loss_output = self.loss_fn["output"](outputs[:, :, :-4].reshape(-1, outputs.shape[-1] - 4), args[-2].reshape(-1))
            box_loss = self.loss_fn["box"](outputs[:, :, -4:], args[-1])
            # loss_cls = self.loss_fn["cls"](cls_output.squeeze(), args[-1])

            loss = loss_output + box_loss
        mode = kwargs["mode"]
        loss_step_loss = loss.item()

        # 不是训练情况的下，直接返回loss和输出
        if mode != "train":
            return loss_step_loss, outputs

        loss.backward()
        self.optimizer.step()

        if self.lr_schedule is not None:
            self.lr_schedule.step()

        return loss_step_loss, outputs

    @staticmethod
    def _check_and_mkdir():
        path = Path("../checkpoints")
        if not path.exists():
            path.mkdir(parents=True)

    def generate_key_padding_mask(self, _src: torch.Tensor, padding_idx):
        _res = torch.zeros_like(_src, dtype=torch.float32, device=self.device)
        _res[_src == padding_idx] = 1

        _res = _res.masked_fill(_res == 1, -torch.inf)

        mask = Transformer.generate_square_subsequent_mask(_src.size(1), device=self.device)
        return _res, mask

    def cal_metrix(self, outputs, targets):
        template = "{:4s}:{:8.4f}"
        s = "["

        _, logits = torch.max(outputs.reshape(-1, outputs.shape[-1]).detach().cpu(), -1)
        for name in self.metric:
            val = self.metric[name](logits.numpy().flatten(),
                                    targets.reshape(-1).detach().cpu().numpy())

            s += template.format(name, val)

            s + "\t"

        s = s[:-1] + "]"

        return s

    def set_metrix(self, **kwargs):
        self.metric = kwargs


class WarmupPolicy:
    def __init__(self, lr, min_lr=1e-5, warm_step=1000):
        self.lr = lr
        self.warm_step = warm_step
        self.min_lr = min_lr

        self.buffer = lr

    def __call__(self, cur_iter):
        if cur_iter < self.warm_step:
            warmup_percent_done = cur_iter / self.warm_step
            warmup_learning_rate = self.lr * warmup_percent_done
            learning_rate = warmup_learning_rate

        else:
            learning_rate = self.buffer ** 1.0001

        if learning_rate < self.min_lr:
            learning_rate = self.min_lr
        self.buffer = learning_rate

        return learning_rate
