# from typing import Callable, Sequence, Tuple
#
# import paddle.fluid as fluid
#
# from helm.dynamic.models.nas.nasbench201.model_search import
# from helm.utils import time_now
# from helm.static import compile_program
# from helm.static.engine.callback import Callback
# from helm.static.engine.engine import Engine
# from helm.static.engine.metrics import Metric
#
#
# def requires_grad(network: Network, arch: bool, model: bool):
#     for p in network.arch_parameters():
#         p.trainable = arch
#         p.stop_gradient = arch
#     for p in network.model_parameters():
#         p.trainable = model
#         p.stop_gradient = model
#
#
# def create_trainer(
#         executor: fluid.Executor,
#         model_fn: Callable[[], Network],
#         criterion: Callable,
#         optimizer_fn: Callable[[], Tuple[fluid.Variable, fluid.optimizer.Optimizer, fluid.optimizer.Optimizer]],
#         metrics: Sequence[Metric],
#         input_shape: Tuple[int, int, int],
#         log_freq: int,
# ):
#     fetches = {}
#     with fluid.program_guard(fluid.default_main_program(), fluid.default_startup_program()):
#         with fluid.unique_name.guard():
#             model = model_fn()
#
#             output = {}
#
#             input = fluid.data(name='input', shape=[None, *input_shape], dtype='float32')
#             target = fluid.data(name='target', shape=[None, 1], dtype='int64')
#             input_search = fluid.data(name='input_search', shape=[None, *input_shape], dtype='float32')
#             target_search = fluid.data(name='target_search', shape=[None, 1], dtype='int64')
#
#             learning_rate, optimizer_arch, optimizer_model = optimizer_fn()
#
#             logits = model(input)
#             loss = criterion(logits, target)
#             optimizer_arch.minimize(loss, parameter_list=list(model.arch_parameters()))
#
#             logits_search = model(input_search)
#             loss_search = criterion(logits_search, target_search)
#             optimizer_model.minimize(loss_search, parameter_list=list(model.model_parameters()))
#
#             output.update({
#                 "loss": loss,
#                 "y_pred": logits_search,
#                 "y_true": target_search,
#             })
#             for m in metrics:
#                 fetches[m.name] = m.append_op(output)
#             fetches["learning_rate"] = learning_rate
#
#     program = compile_program(fluid.default_main_program(), loss.name)
#
#     def step_fn(exe, batch):
#         if len(batch) == 1:
#             batch = batch[0]
#         input, target, input_search, target_search = batch
#         n = input.shape()[0]
#
#         output = exe.run(
#             program,
#             feed={"input": input, "target": target,
#                   "input_search": input_search,
#                   "target_search": target_search},
#             fetch_list=list(fetches.values()))
#
#         output = dict(zip(fetches.keys(), output))
#         output['batch_size'] = n
#         return output
#
#     callbacks = [*metrics, DefaultTrainLogger(log_freq)]
#     engine = Engine(executor, step_fn, callbacks,
#                     model=model, optimizer_arch=optimizer_arch, optimizer_model=optimizer_model)
#     return engine
#
#
# def join_metric_logs(metrics, delim=" - "):
#     logs = []
#     for k, v in metrics.items():
#         logs.append("%s: %.4f" % (k, v))
#     return delim.join(logs)
#
#
# class DefaultTrainLogger(Callback):
#
#     def __init__(self, log_freq):
#         super().__init__()
#         self.log_freq = log_freq
#
#     def on_epoch_begin(self, engine):
#         state = engine.state
#         if state.output and 'learning_rate' in state.output:
#             print('Epoch %d/%d, lr: %f' % (state.epoch + 1, state.max_epochs, state.output['learning_rate']))
#         else:
#             print('Epoch %d/%d' % (state.epoch + 1, state.max_epochs))
#
#     def on_batch_end(self, engine):
#         state = engine.state
#         i = state.iteration
#         if (i + 1) % self.log_freq == 0 or i == state.epoch_length - 1:
#             print("%s Train %d/%d - %s" % (
#                 time_now(), i + 1, state.epoch_length, join_metric_logs(state.metrics, delim=" - ")))
#
#
# def create_evaluator(
#         executor: fluid.Executor,
#         model_fn: Callable[[], fluid.Layer],
#         criterion: Callable,
#         metrics: Sequence[Metric],
#         input_shape: Tuple[int, int, int],
#         log_freq: int,
# ):
#     program = fluid.Program()
#
#     fetches = {}
#     with fluid.program_guard(program, fluid.default_startup_program()):
#         with fluid.unique_name.guard():
#             model = model_fn()
#             x = fluid.data(
#                 name='image', shape=[None, *input_shape], dtype='float32')
#             y = fluid.data(name='label', shape=[None, 1], dtype='int64')
#             out = model(x)
#             loss = criterion(out, y)
#
#             output = {
#                 "loss": loss,
#                 "y_true": y,
#                 "y_pred": out,
#             }
#             for m in metrics:
#                 fetches[m.name] = m.append_op(output)
#
#     program = program.clone(for_test=True)
#
#     def step_fn(exe, batch):
#         if len(batch) == 1:
#             batch = batch[0]
#         x, y = batch
#         n = x.shape()[0]
#
#         output = exe.run(
#             program,
#             feed={"image": x, "label": y},
#             fetch_list=list(fetches.values()))
#         output = dict(zip(fetches.keys(), output))
#         output['batch_size'] = n
#         return output
#
#     callbacks = [*metrics, DefaultEvalLogger(log_freq)]
#     engine = Engine(executor, step_fn, callbacks, model=model)
#     return engine
#
#
# class DefaultEvalLogger(Callback):
#
#     def __init__(self, log_freq):
#         super().__init__()
#         self.log_freq = log_freq
#
#     def on_batch_end(self, engine):
#         state = engine.state
#         i = state.iteration
#         if (i + 1) % self.log_freq == 0 or i == state.epoch_length - 1:
#             print("%s Valid %d/%d - %s" % (
#                 time_now(), i + 1, state.epoch_length, join_metric_logs(state.metrics, delim=" - ")))
#
#
