import argparse
import numpy as np
import torch
import nni

import nni.retiarii.strategy as strategy
from nni.retiarii.experiment import RetiariiExperiment, RetiariiExeConfig
from nni.retiarii.evaluator import FunctionalEvaluator

from unet_with_nas import UNet
from tqdm import tqdm
from train import data_loaders, dsc_per_volume, parse_args
from loss import DiceLoss

parser = argparse.ArgumentParser(description="Train and search best UNet arch")
parse_args(parser)
parser.add_argument(
    "--search-strategy",
    type=str,
    default="PolicyBasedRL",
    choices=["PolicyBasedRL", "RegularizedEvolution"],
    help="Search Strategy",
)
parser.add_argument(
    "--trial-concurrency",
    type=int,
    default=1,
    help="trial concurrency",
)
parser.add_argument("--max-trial-number", type=int, default=20, help="max trial number")
args = parser.parse_args()

early_stoping_patient = 5


def evaluate_model(model_class):
    device = torch.device("cpu" if not torch.cuda.is_available() else args.device)

    loader_train, loader_valid = data_loaders(args)
    loaders = {"train": loader_train, "valid": loader_valid}

    unet = model_class()
    unet.to(device)

    dsc_loss = DiceLoss()
    best_validation_dsc = 0.0

    optimizer = torch.optim.Adam(unet.parameters(), lr=args.lr)

    loss_train = []
    loss_valid = []

    step = 0

    cur_patient = 0

    for epoch in tqdm(range(args.epochs), total=args.epochs):
        if cur_patient >= early_stoping_patient:
            print(f"Early Stoping...")
            break
        print(f"Training epoch {epoch}...")
        for phase in ["train", "valid"]:
            if phase == "train":
                unet.train()
            else:
                unet.eval()

            validation_pred = []
            validation_true = []

            for _, data in enumerate(loaders[phase]):
                if phase == "train":
                    step += 1

                x, y_true = data
                x, y_true = x.to(device), y_true.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == "train"):
                    y_pred = unet(x)

                    loss = dsc_loss(y_pred, y_true)

                    if phase == "valid":
                        loss_valid.append(loss.item())
                        y_pred_np = y_pred.detach().cpu().numpy()
                        validation_pred.extend(
                            [y_pred_np[s] for s in range(y_pred_np.shape[0])]
                        )
                        y_true_np = y_true.detach().cpu().numpy()
                        validation_true.extend(
                            [y_true_np[s] for s in range(y_true_np.shape[0])]
                        )

                    if phase == "train":
                        loss_train.append(loss.item())
                        loss.backward()
                        optimizer.step()

                if phase == "train" and (step + 1) % 10 == 0:
                    loss_train = []

            if phase == "valid":
                mean_dsc = np.mean(
                    dsc_per_volume(
                        validation_pred,
                        validation_true,
                        loader_valid.dataset.patient_slice_index,
                    )
                )
                if mean_dsc > best_validation_dsc:
                    best_validation_dsc = mean_dsc
                    cur_patient = 0
                else:
                    cur_patient += 1
                print("Best validation mean DSC: {:4f}".format(best_validation_dsc))
                nni.report_intermediate_result(mean_dsc)
                loss_valid = []

    nni.report_final_result(best_validation_dsc)


def main():
    if args.search_strategy == "PolicyBasedRL":
        # 强化学习搜索策略
        search_strategy = strategy.PolicyBasedRL()
    else:
        # 进化算法搜索策略
        search_strategy = strategy.RegularizedEvolution()
    evaluator = FunctionalEvaluator(evaluate_model)
    model_space = UNet()

    exp = RetiariiExperiment(model_space, evaluator, [], search_strategy)
    exp_config = RetiariiExeConfig("local")
    exp_config.experiment_name = "unet"
    exp_config.trial_concurrency = args.trial_concurrency
    exp_config.max_trial_number = args.max_trial_number
    exp_config.training_service.use_active_gpu = True
    exp.run(exp_config, 18081)


if __name__ == "__main__":
    main()
