import torch
from argparse import ArgumentParser
from model.networks import define_G
from data import build_dataloader
from tqdm import tqdm

from utils.metrics import runningScore
from utils.my_logging import Logger
from segformer.cd_transformer import Segformer


device = "cuda" if torch.cuda.is_available() else "cpu"


def parse_args():
    parser = ArgumentParser()
    parser.add_argument("--num_workers", default=4, type=int)
    parser.add_argument("--root_dir", default=None, type=str)
    parser.add_argument("--batch_size", default=8, type=int)
    parser.add_argument("--lr", default=1e-4, type=float)
    parser.add_argument("--epochs", default=100, type=int)
    parser.add_argument(
        "--net_G", default="base_transformer_pos_s4_dd8_dedim8", type=str
    )
    parser.add_argument("--last_epoch", default=-1, type=int)
    parser.add_argument("--best_f1", default=0, type=int)
    parser.add_argument("--checkpoint", default=None, type=str)
    parser.add_argument("--do", default="train", type=str)
    parser.add_argument("--in_channels", default=3, type=int)
    parser.add_argument("--pretrained", default=False, type=bool)

    args = parser.parse_args()
    return args


def build_model(args):
    if args.net_G == "base_transformer_pos_s4_dd8_dedim8":
        return define_G(args)
    elif args.net_G == "segformer":
        cfg_segformer = dict(
            encode_config=dict(type="mit_b3", pretrained=args.pretrained),
            decoder_config=dict(
                in_channels=[64, 128, 320, 512],
                in_index=[0, 1, 2, 3],
                feature_strides=[4, 8, 16, 32],
                embed_dim=768,
                num_classes=2,
                dropout_ratio=0.1,
            ),
        )
        return Segformer(**cfg_segformer)


def train(model, args):
    best_f1 = 0
    trainloader = build_dataloader(args, split="train")
    valloader = build_dataloader(args, split="val")
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer, step_size=10, gamma=0.5, last_epoch=args.last_epoch
    )
    logger = Logger(args.net_G)
    logger.info(args)
    loss_func = torch.nn.CrossEntropyLoss(reduction="mean")
    epoch = 0
    step = 0
    report_loss = 0.0
    loss_list = []
    epoch_list = []
    while epoch < args.epochs:
        epoch += 1
        logger.info(f"Epoch:{epoch}/{args.epochs}, lr={lr_scheduler.get_last_lr()}")
        for batch in tqdm(trainloader, desc=f"Train:{epoch}/{args.epochs}", ncols=100):
            optimizer.zero_grad()
            x1, x2, mask = (
                batch["A"].to(device),
                batch["B"].to(device),
                batch["L"].to(device),
            )
            pred = model(x1, x2)
            loss = loss_func(pred, mask)
            report_loss += loss.item()
            loss.backward()
            optimizer.step()
            step += 1
        logger.info(f"Train loss = {report_loss / step}")
        loss_list.append(report_loss / step)
        epoch_list.append(epoch)
        step = 0
        report_loss = 0.0
        lr_scheduler.step()
        logger.plot_loss(epoch_list, loss_list)
        score, mean_f1 = eval(model, valloader=valloader)
        if mean_f1 > best_f1:
            best_f1 = mean_f1
            logger.save_model(model)
            for k, v in score.items():
                logger.info(f"{k}{v}")
    logger.log_finish()


def eval(model, args=None, valloader=None):
    model.eval()
    if args:
        valloader = build_dataloader(args, split="val")
    running_metrics_val = runningScore(2)
    with torch.no_grad():
        for batch in tqdm(valloader, desc=f"Valid", ncols=100):
            x1, x2, mask = (
                batch["A"].to(device),
                batch["B"].to(device),
                batch["L"],
            )
            pred_img_1 = model(x1, x2)

            pred_img_2 = model(torch.flip(x1, [-1]), torch.flip(x2, [-1]))
            pred_img_2 = torch.flip(pred_img_2, [-1])

            pred_img_3 = model(torch.flip(x1, [-2]), torch.flip(x2, [-2]))
            pred_img_3 = torch.flip(pred_img_3, [-2])

            pred_img_4 = model(torch.flip(x1, [-1, -2]), torch.flip(x2, [-1, -2]))
            pred_img_4 = torch.flip(pred_img_4, [-1, -2])

            pred_list = pred_img_1 + pred_img_2 + pred_img_3 + pred_img_4
            pred_list = torch.argmax(pred_list.cpu(), 1).byte().numpy()
            gt = mask.data.numpy()
            running_metrics_val.update(gt, pred_list)
    score, mean_f1, _ = running_metrics_val.get_scores()
    model.train()
    for k, v in score.items():
        print(f"{k}{v}")
    return score, mean_f1


if __name__ == "__main__":
    args = parse_args()
    model = build_model(args).to(device)
    if args.do != "train":
        model.load_state_dict(torch.load(args.checkpoint, map_location="cpu"))
        eval(model, args)
    else:
        train(model, args)
