import math

from hhutil.io import fmt_path

import numpy as np

import paddle.fluid as fluid

from helm.datasets import CIFAR10, CIFAR100

from helm.nn.loss import CrossEntropyLoss

import helm.static.models.cifar
import helm.static.lr_scheduler
from helm.static import set_device, set_seed
from helm.static.engine.callback import resume
from helm.static.engine.cls import create_supervised_trainer, create_supervised_evaluator
from helm.static.engine.cls.metrics import Accuracy
from helm.static.engine.metrics import Loss

from helm.config.config import get_config, override
from helm.static.helper import get_optimizer, get_lr_scheduler, get_model
from helm.static.models.layers import DEFAULTS, update_defaults


cfg = get_config("/Users/hrvvi/Downloads/17.yaml")
save_path = fmt_path("/Users/hrvvi/Downloads")
epoch = 300

dataset = cfg.Dataset.type
assert dataset in ["CIFAR10", 'CIFAR100']

fp16 = cfg.get("fp16", False)
if fp16:
    assert cfg.device == 'gpu'
    DEFAULTS['fp16'] = True

update_defaults(cfg.get("Global", {}), DEFAULTS)

set_seed(cfg.seed)
place = set_device(cfg.device)

if dataset == "CIFAR10":
    num_classes, CIFAR = 10, CIFAR10
else:
    num_classes, CIFAR = 100, CIFAR100

if cfg.get("hpo"):
    import nni
    RCV_CONFIG = nni.get_next_parameter()
    for k, v in RCV_CONFIG.items():
        ks = k.split(".")
        override(cfg, ks, v)

use_mix = cfg.get("Mix") is not None
if use_mix:
    cfg.Mix.num_classes = num_classes

def model_fn(is_train):
    cfg.Model.num_classes = num_classes
    model = get_model(cfg.Model, helm.static.models.cifar, is_train)
    return model

criterion = CrossEntropyLoss(cfg.get("label_smooth"))

batch_size = cfg.Dataset.Train.batch_size
step_per_epoch = math.ceil(50000 / batch_size)
epochs = cfg.epochs

def optimizer_fn():
    learning_rate = get_lr_scheduler(cfg.LRScheduler, step_per_epoch, epochs)
    optimizer = get_optimizer(cfg.Optimizer, learning_rate)
    if fp16:
        optimizer = fluid.contrib.mixed_precision.decorate(
            optimizer,
            init_loss_scaling=128.0,
            use_dynamic_loss_scaling=True)
    return learning_rate, optimizer

input_shape = cfg.input_shape

metrics = [
    Loss(),
]
if not use_mix:
    metrics.append(Accuracy())

test_metrics = [
    Loss(),
    Accuracy(),
]

log_freq = cfg.log_freq
eval_freq = cfg.get("eval_freq", 1)
save_freq = cfg.get("save_freq")
n_saved = cfg.get("n_saved", 1)

exe = fluid.Executor(place)

train_engine = create_supervised_trainer(
    exe, model_fn, criterion, optimizer_fn, metrics, input_shape, log_freq,
    soft_label=use_mix, auxiliary_weight=cfg.get("auxiliary_weight", None))
exe.run(fluid.default_startup_program())

resume(train_engine, save_path, epoch)

def fetch(t):
    return np.array(fluid.global_scope().find_var(t.name).get_tensor())

d = {}
for k, v in train_engine.model.state_dict().items():
    d[k] = fetch(v)

fluid.enable_imperative()

from helm.dynamic.helper import get_model
model = get_model(cfg.Model, helm.static.models.cifar)
model.set_dict(d)
fluid.save_dygraph(model.state_dict(), str(save_path / f"epoch_{epoch}"))