from typing import Union, Callable, List, Optional

import paddle
from helm.dynamic.models.nas.darts.model_search import Network
from paddle.fluid.dygraph import Layer
from paddle.fluid.optimizer import Optimizer

from helm.dynamic.engine.engine import Engine
from helm.dynamic.engine.callback import DefaultTrainLogger, DefaultEvalLogger, ModelCheckpoint
from helm.dynamic.engine.metrics import Metric


def requires_grad(network: Network, arch: bool, model: bool):
    for p in network.arch_parameters():
        p.trainable = arch
    for p in network.model_parameters():
        p.trainable = model


def create_trainer(
        model: Layer,
        criterion: Union[Callable, Layer],
        optimizer_arch: Optimizer,
        optimizer_model: Optimizer,
        metrics: List[Metric],
        log_freq: int = 10,
        save_freq: Optional[int] = None,
        save_dir: Optional[int] = None,
):
    def train_batch(engine, batch):
        engine.model.train()

        input, target, input_search, target_search = batch

        requires_grad(model, arch=True, model=False)
        logits = engine.model(input)
        loss = engine.criterion(logits, target)
        loss.backward()
        engine.optimizer_arch.minimize(loss)
        engine.optimizer_arch.clear_gradients()

        requires_grad(model, arch=False, model=True)
        logits_search = engine.model(input_search)
        loss_search = engine.criterion(logits_search, target_search)
        loss_search.backward()
        engine.optimizer_model.minimize(loss_search)
        engine.optimizer_model.clear_gradients()

        output = {
            "loss": float(loss.numpy()),
            "y_true": target_search,
            "y_pred": logits_search.detach(),
            "batch_size": input.shape[0],
            "lr": engine.optimizer_model.current_step_lr(),
        }

        return output

    callbacks = [*metrics, DefaultTrainLogger(log_freq)]
    if save_freq:
        callbacks.append(ModelCheckpoint(save_freq, save_dir))
    engine = Engine(
        train_batch, callbacks, model=model, criterion=criterion,
        optimizer_arch=optimizer_arch, optimizer_model=optimizer_model)
    return engine


def create_evaluator(
        model: Layer,
        criterion: Union[Callable, Layer],
        metrics: List[Metric],
        log_freq: int = 10,
        stage: str = "Valid",
):

    def test_batch(engine, batch):
        engine.model.eval()

        image, label = batch

        with paddle.fluid.dygraph.no_grad():
            pred = engine.model(image)
            loss = engine.criterion(pred, label)

        output = {
            "loss": float(loss.numpy()),
            "y_true": label,
            "y_pred": pred,
            "batch_size": image.shape[0],
        }

        return output

    callbacks = [*metrics, DefaultEvalLogger(log_freq)]
    engine = Engine(test_batch, callbacks,
                    model=model, criterion=criterion, stage=stage)

    return engine


