from tqdm import tqdm
from lib.utils import (
    Logger,
    focal_loss,
    get_train_process,
    set_seed
)
import os
from typing import List, Union
from lib.models_star import Net
from torch.optim import AdamW
import torch
from datasets import datasets_pe as ds
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR

class TrainConfig:

    def __init__(self, **kwargs):
        train_conf = kwargs['train']
        self.dataset = kwargs["dataset"]
        self.run_mode: Union[str, int] = kwargs["run_mode"]
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.epochs: int = int(train_conf["epochs"])
        self.batch_size: int = int(train_conf["batch_size"])
        self.lr: float = float(train_conf["lr"])
        self.weight_decay: float = float(train_conf["weight_decay"])
        self.with_cls: bool = kwargs["with_cls"]
        self.cls_num: int = int(kwargs["cls_num"])
        self.aug = train_conf["aug"]
        self.re_gen: bool = train_conf["re_gen"]
        self.load_from_memory: bool = train_conf["load_from_memory"]
        self.seed = int(train_conf["seed"])
        self.tqdm = train_conf["tqdm"]
        self.data_dir: str = kwargs["data_dir"]
        self.save_path: str = kwargs["save_path"]
        self.model_name: str = kwargs["model_name"]
        self.ce_weight = int(train_conf["ce_weight"])
        self.mse_weight = int(train_conf["mse_weight"])
        self.l1_weight = int(train_conf["l1_weight"])
        self.n_rays = int(kwargs["n_rays"])
        self.with_pe = bool(kwargs["with_pe"])
        self.with_sff = bool(kwargs["with_sff"])
        self.ratio = int(kwargs["ratio"])
        self.load_model = bool(train_conf["load_model"])
        self.load_model_path = str(train_conf["load_model_path"])


class Trainer:

    def __init__(self, config: TrainConfig):
        set_seed(config.seed)
        self.config = config
        os.makedirs(self.config.save_path, exist_ok=True)
        self.logger = Logger(f"checkpoints/{self.config.dataset}/log.txt")
        self.model = Net(
            n_rays=self.config.n_rays,
            with_pe=self.config.with_pe,
            with_sff=self.config.with_sff,
            ratio=self.config.ratio,
        ).to(self.config.device)
        self.optimizer = AdamW(
            self.model.parameters(),
            lr=self.config.lr,
            weight_decay=self.config.weight_decay,
        )
        self.shcheduler = StepLR(self.optimizer, step_size=50, gamma=0.998)
        self.run_mode = self.config.run_mode
        self.train_transform, self.val_transform = get_train_process(self.config.aug)

    def get_dataloader(self, f1=-1, f2=-1, f3=-1):
        train_data = getattr(ds, self.config.dataset)(
            base_dir=self.config.data_dir,
            re_gen=self.config.re_gen,
            mode="train",
            load_from_memory=self.config.load_from_memory,
            transform=self.train_transform,
            n_rays=self.config.n_rays,
            with_pe=self.config.with_pe,
            fold_id=f1
        )
        train_loader = DataLoader(
            train_data,
            batch_size=self.config.batch_size,
            shuffle=True,
            num_workers=2,
            persistent_workers=True,
        )
        val_data = getattr(ds, self.config.dataset)(
            base_dir=self.config.data_dir,
            re_gen=self.config.re_gen,
            mode="test",
            load_from_memory=self.config.load_from_memory if f1 == -1 else False,
            transform=self.val_transform,
            n_rays=self.config.n_rays,
            with_pe=False,
            fold_id=f2,
        )
        val_loader = DataLoader(
            val_data,
            # batch_size=self.config.batch_size if f1 == -1 else 1,
            batch_size=1,
            shuffle=False,
            num_workers=1,
            persistent_workers=True,
        )

        if f1 == -1:             
            return train_loader, val_loader

        # pannuke or Lizard
        val_data2 = getattr(ds, self.config.dataset)(
            base_dir=self.config.data_dir,
            re_gen=self.config.re_gen,
            mode="test",
            load_from_memory=False,
            transform=self.val_transform,
            n_rays=self.config.n_rays,
            with_pe=False,
            fold_id=f3,
        )
        val_loader2 = DataLoader(
            val_data2,
            batch_size=1,
            shuffle=False,
        )
        return train_loader, [val_loader, val_loader2]

    def train(self):

        def train_val_once(loader, is_train: bool=True):
            self.model.train(is_train)
            losses, ious = [], []
            # for images, labels, dist_target, distance, pe in tqdm(loader, disable=not self.config.tqdm):
            for images, *labels in tqdm(loader, disable=not self.config.tqdm):
                self.optimizer.zero_grad()
                images, inst_masks = images.to(self.config.device), labels[0].to(self.config.device)
                pred_mask, *temp = self.model(images)
                pred_dist, pred_distance = temp[0][:, 0, :, :], temp[0][:, 1:, :, :]
                if is_train:
                    dist_target, distance = labels[1].to(self.config.device), labels[2].to(self.config.device)
                    loss = (
                        self.config.ce_weight * F.cross_entropy(pred_mask, inst_masks)
                        + focal_loss(pred_mask, inst_masks)
                        + self.config.mse_weight * F.mse_loss(pred_dist, dist_target)
                        + self.config.l1_weight * F.smooth_l1_loss(pred_distance, distance) 
                    )
                    if self.config.with_pe:
                        pe = labels[3].to(self.config.device)
                        pe1, pe2 = temp[1].squeeze(1), temp[2].squeeze(1)
                        # pe2 = temp[1].squeeze(1)
                        loss += F.mse_loss(pe1, pe) + F.mse_loss(pe2, pe)
                        # loss += F.mse_loss(pe2, pe)
                    loss.backward()
                    self.optimizer.step()
                    # self.shcheduler.step()
                else:
                    inst_masks = inst_masks.bool().long()
                    loss = (
                        self.config.ce_weight * F.cross_entropy(pred_mask, inst_masks)
                        + focal_loss(pred_mask, inst_masks)
                    )
                losses.append(loss.item())
                pred_mask_bool = pred_mask.argmax(1).bool()
                targets_bool = inst_masks.bool()
                iou = (pred_mask_bool & targets_bool).sum() / (pred_mask_bool | targets_bool).sum()
                if not torch.isnan(iou):
                    ious.append(iou.item())
            return sum(losses) / len(losses), sum(ious) / len(ious)

        name = os.path.join(
            self.config.save_path,
            self.config.model_name.replace(".", f"_{self.config.n_rays}rays."),
        )
        if self.run_mode == "tt":
            # self.model.load_state_dict(torch.load(name), strict=False,)
            # self.model.load_state_dict(
            #     torch.load("checkpoints/MoNuSeg/best_model_MoNuSeg_star_mf_4rays.pth"),
            #     strict=False,
            # )
            if self.config.load_model:
                dict = torch.load(self.config.load_model_path)
                keys_to_delete = [key for key in dict.keys() if "out_ray" in key]
                del dict["out_layer.1.bias"], dict["out_layer.1.weight"]
                for key in keys_to_delete:
                    del dict[key]
                self.model.load_state_dict(
                    dict,
                    strict=False,
                )
                print(f"Load model from {self.config.load_model_path}")
            train_loader, val_loader = self.get_dataloader()
            interval = 10
            max_iou = 0
            for epoch in range(1, self.config.epochs + 1):
                interval = interval - 2 if epoch % 500 == 0 else interval
                interval = 0 if epoch + 700 >= self.config.epochs else interval
                torch.cuda.empty_cache()
                loss, iou = train_val_once(train_loader)
                self.logger.log(f'Epoch {epoch} - Train: loss = {loss:.4f}, iou = {iou:.4f}')
                if interval <= 0 or epoch % interval == 0:
                    torch.cuda.empty_cache()
                    loss, iou = train_val_once(val_loader, False)
                    if iou > max_iou:
                        max_iou = iou
                        torch.save(self.model.state_dict(), name)
                    self.logger.log(f'Epoch {epoch} - Val: loss = {loss:.4f}, iou = {iou:.4f}, best = {max_iou:.4f}')
        else:
            # for t, v1, v2 in [(2, 1, 3)]:
            for t, v1, v2 in [(1, 2, 3), (2, 1, 3), (3, 1, 2)]:
                interval = 10
                max_iou = [0, 0]
                save_path = [
                    os.path.join(self.config.save_path, f"{t}{v1}{v2}"),
                    os.path.join(self.config.save_path, f"{t}{v2}{v1}"),
                ]
                if self.config.load_model:
                    dict = torch.load(os.path.join(save_path[0], self.config.load_model_path))
                    keys_to_delete = [key for key in dict.keys() if "out_ray" in key]
                    del dict["out_layer.1.bias"], dict["out_layer.1.weight"]
                    for key in keys_to_delete:
                        del dict[key]
                    self.model.load_state_dict(
                        dict,
                        strict=False,
                    )
                    print(f"Load model from {self.config.load_model_path}")
                # dict = torch.load(os.path.join(save_path[0], self.config.model_name))
                # keys_to_delete = [k for k in dict.keys() if "out_layer" in k]
                # for key in keys_to_delete:
                #     del dict[key]
                # self.model.load_state_dict(dict, strict=False)
                train_loader, val_loader = self.get_dataloader(t, v1, v2)
                [os.makedirs(x, exist_ok=True) for x in save_path]
                for epoch in range(1, self.config.epochs + 1):
                    interval = interval - 2 if epoch % 500 == 0 else interval
                    interval = 0 if epoch + 700 >= self.config.epochs else interval
                    torch.cuda.empty_cache()
                    loss, iou = train_val_once(train_loader)
                    self.logger.log(f'Epoch {epoch} - Train: loss = {loss:.4f}, iou = {iou:.4f}')
                    if interval <= 0 or epoch % interval == 0:
                        for i in range(2):
                            torch.cuda.empty_cache()
                            loss, iou = train_val_once(val_loader[i], False)
                            torch.cuda.empty_cache()
                            if iou > max_iou[i]:
                                max_iou[i] = iou
                                torch.save(self.model.state_dict(), os.path.join(save_path[i], self.config.model_name.replace(".", f"_{self.config.n_rays}rays.")))
                            self.logger.log(f'Epoch {epoch} - Val: loss = {loss:.4f}, iou = {iou:.4f}, best = {max_iou[i]:.4f}')
                            if iou + 0.015 < max_iou[i]:
                                break
                # 清空模型参数
                self.model = Net(
                    n_rays=self.config.n_rays,
                    with_pe=self.config.with_pe,
                    with_sff=self.config.with_sff,
                    ratio=self.config.ratio,
                ).cuda()
                self.optimizer = AdamW(
                    self.model.parameters(),
                    lr=self.config.lr,
                    weight_decay=self.config.weight_decay,
                )
