import torch
import json
from tqdm import tqdm

from utils.my_logging import Logger
from data import build_dataloader
from model import build_model
from utils.metrics import runningScore
from argparse import ArgumentParser
import numpy as np
import random


def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)
     torch.backends.cudnn.deterministic = True
# 设置随机数种子
setup_seed(666)


def parse_args():
    parser = ArgumentParser()
    parser.add_argument("--num_workers", default=4, type=int)
    parser.add_argument("--root_dir", default=None, type=str, required=True)
    parser.add_argument("--batch_size", default=8, type=int)
    parser.add_argument("--lr", default=1e-4, type=float)
    parser.add_argument(
        "--net_G", default="base_transformer_pos_s4_dd8_dedim8", type=str
    )
    parser.add_argument("--last_epoch", default=-1, type=int)
    parser.add_argument("--best_f1", default=0, type=int)
    parser.add_argument("--in_channels", default=3, type=int)
    parser.add_argument("--pretrained", default=None, type=str)
    parser.add_argument("--device", default="cuda:0", type=str)
    parser.add_argument("--restore", default=False, type=bool)
    parser.add_argument("--restore_path", default=None, type=str)
    parser.add_argument("--last_step", default=0, type=int)
    parser.add_argument("--total_steps", default=80000, type=int)
    parser.add_argument("--eval_steps", default=1000, type=int)
    parser.add_argument("--n_classes", default=2, type=int)
    parser.add_argument("--eval", default=False, type=bool)

    args = parser.parse_args()
    return args


class Trainer(object):
    def __init__(self, args):
        self.args = args
        self.logger = Logger(args.net_G)
        self.model = build_model(args)
        self._unpack_train_config(args)
        self.loss_func = torch.nn.CrossEntropyLoss(reduction="mean")
        self.train_loader = build_dataloader(args, split="train")
        self.logger.info(f"Train Dataset num: {len(self.train_loader)}")
        if self.eval:
            self.val_loader = build_dataloader(args, split="val")
            self.logger.info(f"Eval Dataset num: {len(self.val_loader)}")
        self.best_f1 = 0
        self.early_stopping = 0
        self.step_list, self.mf1_list, self.mIoU_list = [], [], []

    def train(self):
        self.logger.info(f"\n{self.args}")
        optimizer = torch.optim.Adam(
            [{"params": self.model.parameters(), "initial_lr": self.lr}],
            lr=self.lr,
            weight_decay=1e-5,
        )
        lr_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer,
            step_size=10,
            gamma=0.5,
            last_epoch=self.last_step // self.eval_steps,
        )
        self.model.to(self.device)
        train_iter = iter(self.train_loader)
        report_loss = 0
        for step in tqdm(
            range(self.last_step, self.total_steps), desc=f"Train", ncols=150
        ):

            try:
                batch = next(train_iter)
            except StopIteration:
                train_iter = iter(self.train_loader)
                batch = next(train_iter)
            x1, x2, mask = (
                batch["A"].to(self.device),
                batch["B"].to(self.device),
                batch["L"].to(self.device),
            )
            optimizer.zero_grad()
            pred_img = self.model(x1, x2)
            loss = self.loss_func(pred_img, mask)
            report_loss += loss.item()
            loss.backward()
            optimizer.step()
            # eval
            if (step + 1) % self.eval_steps == 0 and self.eval:
                self.eval()
                lr_scheduler.step()
                self.step_list.append(step + 1)
                self.logger.plot_acc(self.step_list, self.mIoU_list, self.mf1_list)
                if self.early_stopping > 10:
                    break
        
        self.logger.save_model(self.model)
        self.logger.log_finish(self.best_iou)

    def eval(self):
        self.model.eval()

        running_metrics_val = runningScore(self.n_classes)
        with torch.no_grad():
            for batch in tqdm(self.val_loader, desc="Valid", ncols=150):
                x1, x2, mask = (
                    batch["A"].to(self.device),
                    batch["B"].to(self.device),
                    batch["L"],
                )

                pred_img_1 = self.model(x1, x2)

                pred_img_2 = self.model(torch.flip(x1, [-1]), torch.flip(x2, [-1]))
                pred_img_2 = torch.flip(pred_img_2, [-1])

                pred_img_3 = self.model(torch.flip(x1, [-2]), torch.flip(x2, [-2]))
                pred_img_3 = torch.flip(pred_img_3, [-2])

                pred_img_4 = self.model(
                    torch.flip(x1, [-1, -2]), torch.flip(x2, [-1, -2])
                )
                pred_img_4 = torch.flip(pred_img_4, [-1, -2])

                pred_list = pred_img_1 + pred_img_2 + pred_img_3 + pred_img_4
                pred_list = torch.argmax(pred_list.cpu(), 1).byte().numpy()
                gt = mask.data.numpy()
                running_metrics_val.update(gt, pred_list)
        score, mean_f1, mIoU = running_metrics_val.get_scores()
        self.mf1_list.append(mean_f1)
        self.mIoU_list.append(mIoU)
        self.logger.info(f"---------------------------------------")
        for k, v in score.items():
            self.logger.info(f"{k}{v}")
        if mean_f1 > self.best_f1:
            self.logger.save_model(self.model)
            self.best_f1 = mean_f1
            self.early_stopping = 0
            self.best_iou = mIoU
        else:
            self.early_stopping += 1
        self.model.train()
        return score, mean_f1, mIoU

    def _unpack_train_config(self, args):
        self.device = torch.device(args.device)
        self.lr = args.lr
        self.total_steps = args.total_steps
        self.eval_steps = args.eval_steps
        self.last_step = 0
        self.restore = args.restore
        self.restore_path = args.restore_path
        self.n_classes = args.n_classes
        self.eval = args.eval
        if self.restore:
            self.last_step = args.last_step
            self._restore()

    def _restore(self):
        state = torch.load(self.restore_path, map_location="cpu")
        new_state = {}
        excepted = []
        loaded = []
        for k, v in state.items():
            if k in self.model.state_dict().keys():
                shape = self.model.state_dict()[k].shape
                if v.shape == shape:
                    new_state[k] = v
                    loaded.append(k)
                else:
                    excepted.append(k)
                    new_state[k] = self.model.state_dict()[k]
            excepted.append(k)
        self.model.load_state_dict(new_state)
        print(f"Restored from {self.last_step} step, loaded: {loaded} \nexcepted:{excepted}")
        self.logger.info(f"Restored from {self.last_step} step, loaded: {loaded} \nexcepted:{excepted}")


if __name__ == "__main__":
    args = parse_args()
    trainer = Trainer(args)
    trainer.train()
