from typing import Callable, Sequence, Tuple, Optional

from hhutil.io import time_now

import paddle.fluid as fluid

from helm.static import compile_program
from helm.static.engine.callback import Callback
from helm.static.engine.engine import Engine
from helm.static.engine.metrics import Metric


def create_supervised_trainer(
        executor: fluid.Executor,
        model_fn: Callable[[bool], fluid.Layer],
        criterion: Callable,
        optimizer_fn: Callable[[], Tuple[fluid.Variable, fluid.optimizer.Optimizer]],
        metrics: Sequence[Metric],
        input_shape: Tuple[int, int, int],
        log_freq: int,
        soft_label=False,
        auxiliary_weight: Optional[float] = None,
):
    fetches = {}
    with fluid.program_guard(fluid.default_main_program(), fluid.default_startup_program()):
        with fluid.unique_name.guard():
            model = model_fn(True)

            output = {}

            x = fluid.data(
                name='image', shape=[None, *input_shape], dtype='float32')

            learning_rate, optimizer = optimizer_fn()
            logits = model(x)

            if soft_label:
                y = fluid.data(name='label', shape=[None, logits.shape[1]], dtype='float32')
            else:
                y = fluid.data(name='label', shape=[None, 1], dtype='int64')
            if auxiliary_weight and isinstance(logits, Sequence) and len(logits) == 2:
                logits, logits_aux = logits
                loss = criterion(logits, y) + auxiliary_weight * criterion(logits_aux, y)
            else:
                loss = criterion(logits, y)
            optimizer.minimize(loss)

            output.update({
                "loss": loss,
                "y_pred": logits,
                "y_true": y,
            })
            for m in metrics:
                fetches[m.name] = m.append_op(output)
            fetches["learning_rate"] = learning_rate

    program = compile_program(fluid.default_main_program(), loss.name)

    def step_fn(exe, batch):
        if len(batch) == 1:
            batch = batch[0]
        x, y = batch
        n = x.shape()[0]

        output = exe.run(
            program,
            feed={"image": x, "label": y},
            fetch_list=list(fetches.values()))

        output = dict(zip(fetches.keys(), output))
        output['batch_size'] = n
        return output

    callbacks = [*metrics, DefaultTrainLogger(log_freq)]
    engine = Engine(executor, step_fn, callbacks, model=model, optimizer=optimizer)
    return engine


def join_metric_logs(metrics, delim=" - "):
    logs = []
    for k, v in metrics.items():
        logs.append("%s: %.4f" % (k, v))
    return delim.join(logs)


class DefaultTrainLogger(Callback):

    def __init__(self, log_freq):
        super().__init__()
        self.log_freq = log_freq

    def on_epoch_begin(self, engine):
        state = engine.state
        if state.output and 'learning_rate' in state.output:
            print('Epoch %d/%d, lr: %f' % (state.epoch + 1, state.max_epochs, state.output['learning_rate']))
        else:
            print('Epoch %d/%d' % (state.epoch + 1, state.max_epochs))

    def on_batch_end(self, engine):
        state = engine.state
        i = state.iteration
        if (i + 1) % self.log_freq == 0 or i == state.epoch_length - 1:
            print("%s Train %d/%d - %s" % (
                time_now(), i + 1, state.epoch_length, join_metric_logs(state.metrics, delim=" - ")))


def create_supervised_evaluator(
        executor: fluid.Executor,
        model_fn: Callable[[bool], fluid.Layer],
        criterion: Callable,
        metrics: Sequence[Metric],
        input_shape: Tuple[int, int, int],
        log_freq: int,
):
    program = fluid.Program()

    fetches = {}
    with fluid.program_guard(program, fluid.default_startup_program()):
        with fluid.unique_name.guard():
            model = model_fn(False)
            x = fluid.data(
                name='image', shape=[None, *input_shape], dtype='float32')
            y = fluid.data(name='label', shape=[None, 1], dtype='int64')
            out = model(x)
            loss = criterion(out, y)

            output = {
                "loss": loss,
                "y_true": y,
                "y_pred": out,
            }
            for m in metrics:
                fetches[m.name] = m.append_op(output)

    program = program.clone(for_test=True)

    def step_fn(exe, batch):
        if len(batch) == 1:
            batch = batch[0]
        x, y = batch
        n = x.shape()[0]

        output = exe.run(
            program,
            feed={"image": x, "label": y},
            fetch_list=list(fetches.values()))
        output = dict(zip(fetches.keys(), output))
        output['batch_size'] = n
        return output

    callbacks = [*metrics, DefaultEvalLogger(log_freq)]
    engine = Engine(executor, step_fn, callbacks, model=model)
    return engine


class DefaultEvalLogger(Callback):

    def __init__(self, log_freq):
        super().__init__()
        self.log_freq = log_freq

    def on_batch_end(self, engine):
        state = engine.state
        i = state.iteration
        if (i + 1) % self.log_freq == 0 or i == state.epoch_length - 1:
            print("%s Valid %d/%d - %s" % (
                time_now(), i + 1, state.epoch_length, join_metric_logs(state.metrics, delim=" - ")))


class NNIReporter(Callback):

    def __init__(self, metric):
        super().__init__()
        self.metric = metric

    def on_epoch_end(self, engine):
        import nni
        nni.report_intermediate_result(engine.state.metrics[self.metric])