from mindspore import context
from mindspore.communication.management import init
from mindspore.communication.management import release
from mindspore.train.callback import CheckpointConfig
from mindspore.train.callback import ModelCheckpoint
from mindspore.train.serialization import load_checkpoint
from mindspore.nn import Cell
from mindspore.nn import Momentum
from mindspore.nn import SoftmaxCrossEntropyWithLogits
from mindspore.train import Model
from mindspore.context import ParallelMode
from .utils import allclose_nparray
from .utils import clean_all_ckpt_files
from .utils import find_newest_ckpt_file
from mindspore.parallel import set_algo_parameters
from mindspore.ops import Primitive
from mindspore.communication.management import get_rank
from mindspore import default_config
import numpy as np
import os
from mindspore import log as logger
import sys
from mindspore.profiler import Profiler
import re
import json
import tensorflow as tf
import torch

env_dist = os.environ


class MetaFactory:
    def __init__(self):
        self._default_context()
        self._set_context_from_env()
        self.device_target = context.get_context('device_target')
        self.rank_size = None
        self.device_id = None
        self.global_rank_id = None
        self._init_parallel()
        self._set_parallel_env()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        return

    def __del__(self):
        self._release_parallel()

    @staticmethod
    def _default_context():
        target_dict = {
            'ascend or cpu': "Ascend",
            'gpu or cpu': "GPU",
            'cpu': "CPU"
        }
        context.set_context(mode=context.GRAPH_MODE)
        default_target = default_config.__device_target__
        context.set_context(device_target=target_dict[default_target])

    @staticmethod
    def _set_context_from_env():
        mode_dict = {
            'GRAPH': context.GRAPH_MODE,
            'GRAPH_MODE': context.GRAPH_MODE,
            'CONTEXT.GRAPH_MODE': context.GRAPH_MODE,
            'PYNATIVE': context.PYNATIVE_MODE,
            'PYNATIVE_MODE': context.PYNATIVE_MODE,
            'CONTEXT.PYNATIVE_MODE': context.PYNATIVE_MODE
        }
        if 'CONTEXT_MODE' in os.environ:
            mode_key = os.environ['CONTEXT_MODE'].upper()
            context.set_context(mode=mode_dict[mode_key])
        if 'CONTEXT_DEVICE_TARGET' in os.environ:
            context.set_context(device_target=os.environ['CONTEXT_DEVICE_TARGET'])
        if 'CONTEXT_ENABLE_SPARSE' in os.environ:
            context.set_context(enable_sparse=True)
        if 'CONTEXT_ENABLE_GRAPH_KERNEL' in os.environ:
            context.set_context(enable_graph_kernel=True)

    def _set_parallel_env(self):
        if 'RANK_SIZE' in os.environ:
            self.rank_size = int(os.environ['RANK_SIZE'])
            if 'RANK_ID' in os.environ:
                self.global_rank_id = int(os.environ['RANK_ID'])
            if self.device_target == 'GPU':
                self.global_rank_id = get_rank()
        if 'DEVICE_ID' in os.environ:
            self.device_id = int(os.environ['DEVICE_ID'])

    def _init_parallel(self):
        self._init_parallel_flag = False
        if 'RANK_SIZE' in os.environ:
            if self.device_target == 'Ascend':
                init(backend_name='hccl')
            if self.device_target == 'GPU':
                init(backend_name='nccl')
            self._init_parallel_flag = True

    def _release_parallel(self):
        if self._init_parallel_flag:
            release()

    @staticmethod
    def _save_graphs(save_graph_flag=False, save_graph_path="."):
        context.set_context(save_graphs=save_graph_flag, save_graphs_path=save_graph_path)


class OpsFactory(Cell, MetaFactory):
    def __init__(self, dtype=np.float16):
        super().__init__()
        MetaFactory.__init__(self)
        self.dtype = dtype
        if self.dtype == np.float16:
            self.loss = 1e-3
        elif self.dtype == np.float32:
            self.loss = 1e-4
        elif self.dtype == np.float64:
            self.loss = 1e-5
        else:
            self.loss = 0

        seed = np.random.randint(2 ** 32)
        np.random.seed(seed)
        logger.info("set seed: {}".format(seed))

    def forward_mindspore_impl(self, *args, **kwargs):
        raise NotImplementedError

    def forward_pytorch_impl(self, *args, **kwargs):
        raise NotImplementedError

    def forward_tensorflow_impl(self, *args, **kwargs):
        raise NotImplementedError

    def forward_numpy_impl(self, *args, **kwargs):
        raise NotImplementedError

    def grad_mindspore_impl(self, *args, **kwargs):
        raise NotImplementedError

    def grad_pytorch_impl(self, *args, **kwargs):
        raise NotImplementedError

    def grad_tensorflow_impl(self, *args, **kwargs):
        raise NotImplementedError

    def grad_numpy_impl(self, *args, **kwargs):
        raise NotImplementedError

    def forward_cmp(self):
        raise NotImplementedError

    def grad_cmp(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_profile(self, net, run_time, op_name, *inputs):
        '''
        profiler forward and grad for mindspore
        :param net: the mindspore net
        :param run_time: the times for running
        :param op_name: the primitive op name in API
        :param inputs: the inputs
        :return: the profiler data
        '''
        profiler_ms = Profiler()
        for _ in range(run_time):
            net(*inputs)
        profiler_ms.analyse()
        logger.info("mindspore profiler: {}".format(profiler_ms.op_analyse(op_name)))
        profile_ms = float(
            json.loads(profiler_ms.op_analyse(op_name))[op_name][0]["op_avg_time(us)"])
        return profile_ms

    def tensorflow_forward_profile(self, net, run_time, op_name, *inputs, **kwargs):
        '''
        profiler forward for tensorflow
        :param net: the tensorflow net
        :param run_time: the times for running
        :param op_name: the op name in tensorflow node
        :param inputs: the inputs
        :return: the profiler data
        '''
        graph = tf.Graph()
        #with graph.as_default():
        out = net(*inputs, **kwargs)
        inits = tf.compat.v1.global_variables_initializer()
        config = tf.compat.v1.ConfigProto()
        with tf.compat.v1.Session(config=config) as sess:
            profiler = tf.compat.v1.profiler.Profiler(sess.graph)
            sess.run(inits)
            run_meta = tf.compat.v1.RunMetadata()
            option = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
            for _ in range(run_time):
                sess.run(out, options=option, run_metadata=run_meta)
                profiler.add_step(0, run_meta)
            profileoptionbuilder = tf.compat.v1.profiler.ProfileOptionBuilder
            opts = profileoptionbuilder.time_and_memory(min_micros=0, min_bytes=0)
            logger.info(
                "tensorflow profiler: {}".format(profiler.profile_operations(options=opts)))
            if 'CONTEXT_DEVICE_TARGET' in os.environ and os.environ[
                'CONTEXT_DEVICE_TARGET'].upper() == 'GPU':
                forward_profile_tf = float(
                    str(profiler.profile_operations(options=opts)).split(op_name)[
                        2].split('cpu_exec_micros')[1].split()[1]) / run_time
            else:
                forward_profile_tf = float(
                    str(profiler.profile_operations(options=opts)).split(op_name)[
                        2].split('cpu_exec_micros')[1].split()[1]) / run_time
        return forward_profile_tf

    def tensorflow_grad_profile(self, net, run_time, op_name, grads, *inputs):
        '''
        profiler grad for tensorflow
        :param net: the tensorflow net
        :param run_time: the times for running
        :param op_name: the op name in tensorflow node
        :param grads: the gradients for inputs
        :param inputs: the inputs
        :return: the profiler data
        '''
        graph = tf.Graph()
        #with graph.as_default():
        inputs0 = [tf.Variable(arg) for arg in inputs]
        out = net(*inputs0)
        grad_net = tf.gradients(out, inputs, grad_ys=grads)
        inits = tf.compat.v1.global_variables_initializer()
        config = tf.compat.v1.ConfigProto()
        with tf.compat.v1.Session(config=config) as sess:
            profiler = tf.compat.v1.profiler.Profiler(sess.graph)
            sess.run(inits)
            run_meta = tf.compat.v1.RunMetadata()
            option = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
            for _ in range(run_time):
                sess.run(grad_net, options=option, run_metadata=run_meta)
                profiler.add_step(0, run_meta)
            profileoptionbuilder = tf.compat.v1.profiler.ProfileOptionBuilder
            opts = profileoptionbuilder.time_and_memory(min_micros=0, min_bytes=0)
            logger.info(
                "tensorflow profiler: {}".format(profiler.profile_operations(options=opts)))
            if 'CONTEXT_DEVICE_TARGET' in os.environ and os.environ[
                'CONTEXT_DEVICE_TARGET'].upper() == 'GPU':
                grad_profile_tf = float(
                    str(profiler.profile_operations(options=opts)).split(op_name)[
                        2].split('accelerator_exec_micros')[1].split()[1]) / run_time
            else:
                grad_profile_tf = float(
                    str(profiler.profile_operations(options=opts)).split(op_name)[
                        2].split('cpu_exec_micros')[1].split()[1]) / run_time
        return grad_profile_tf

    def pytorch_forward_profile(self, net, run_time, op_name, *inputs):
        '''
        profiler forward for pytorch
        :param net: the pytorch net
        :param run_time: the times for running
        :param op_name: the op name in pytorch node
        :param inputs: the inputs
        :return: the profiler data
        '''
        profile = torch.profiler.profile
        record_function = torch.profiler.record_function
        profileractivity = torch.profiler.ProfilerActivity
        if 'CONTEXT_DEVICE_TARGET' in os.environ and os.environ[
            'CONTEXT_DEVICE_TARGET'].upper() == 'GPU':
            activities = [profileractivity.CPU, profileractivity.CUDA]
            with profile(activities=activities, record_shapes=True) as prof:
                inputs0 = []
                num = len(inputs)
                for i in range(num):
                    if isinstance(inputs[i], torch.Tensor):
                        a = inputs[i].cuda()
                    else:
                        a = inputs[i]
                    inputs0.append(a)
                for _ in range(run_time):
                    with record_function("ops_profile"):
                        net(*inputs0)
        else:
            activities = [profileractivity.CPU]
            with profile(activities=activities, record_shapes=True) as prof:
                for _ in range(run_time):
                    with record_function("ops_profile"):
                        net(*inputs)
        logger.info(
            "pytorch profiler: {}".format(prof.key_averages(group_by_input_shape=True).table()))
        profile_torch = \
            prof.key_averages(group_by_input_shape=True).table().split(op_name)[1].split()[4]
        forward_profile_torch = float(re.sub('[A-Za-z]', '', profile_torch))
        if profile_torch[-2] == 'm':
            forward_profile_torch = forward_profile_torch * 1000
        elif profile_torch[-2] == 'u':
            forward_profile_torch = forward_profile_torch
        else:
            forward_profile_torch = forward_profile_torch * 1000000
        return forward_profile_torch

    def pytorch_grad_profile(self, net, run_time, op_name, grads, *inputs):
        '''
        profiler grad for pytorch
        :param net: the pytorch net
        :param run_time: the times for running
        :param op_name: the op name in pytorch node
        :param grads: the gradients for inputs
        :param inputs: the inputs
        :return: the profiler data
        '''
        profile = torch.profiler.profile
        record_function = torch.profiler.record_function
        profileractivity = torch.profiler.ProfilerActivity
        if 'CONTEXT_DEVICE_TARGET' in os.environ and os.environ[
            'CONTEXT_DEVICE_TARGET'].upper() == 'GPU':
            activities = [profileractivity.CPU, profileractivity.CUDA]
            with profile(activities=activities, record_shapes=True) as prof:
                inputs0 = []
                num = len(inputs)
                for i in range(num):
                    if isinstance(inputs[i], torch.Tensor):
                        a = inputs[i].cuda()
                    else:
                        a = inputs[i]
                    inputs0.append(a)
                grads0 = []
                num0 = len(grads)
                for i in range(num0):
                    if isinstance(grads[i], torch.Tensor):
                        a = grads[i].cuda()
                    else:
                        a = grads[i]
                    grads0.append(a)
                for _ in range(run_time):
                    with record_function("ops_profile"):
                        out = net(*inputs0)
                        out.backward(gradient=grads0)
        else:
            activities = [profileractivity.CPU]
            with profile(activities=activities, record_shapes=True) as prof:
                for _ in range(run_time):
                    with record_function("ops_profile"):
                        out = net(*inputs)
                        out.backward(gradient=grads)
        logger.info(
            "pytorch profiler: {}".format(prof.key_averages(group_by_input_shape=True).table()))
        profile_torch = \
            prof.key_averages(group_by_input_shape=True).table().split(op_name)[1].split()[4]
        grad_profile_torch = float(re.sub('[A-Za-z]', '', profile_torch))
        if profile_torch[-2] == 'm':
            grad_profile_torch = grad_profile_torch * 1000
        elif profile_torch[-2] == 'u':
            grad_profile_torch = grad_profile_torch
        else:
            grad_profile_torch = grad_profile_torch * 1000000
        return grad_profile_torch

    def forward_profile_cmp(self):
        raise NotImplementedError

    def grad_profile_cmp(self):
        raise NotImplementedError


class ParallelOpsFactory(MetaFactory):
    def __init__(self, loss='CrossEntropyLossMean',
                 dataset_sink_mode=False,
                 learning_rate=0.01,
                 momentum=0.9,
                 metrics=None,
                 save_checkpoint_steps=sys.maxsize):
        super().__init__()
        self.loss_fn = loss
        self.dataset_sink_mode = dataset_sink_mode
        self.learning_rate = learning_rate
        self.momentum = momentum
        self.model = None
        self.metrics = metrics
        self.eval_network = None
        self.accuracy_assert = 1e-5
        self.save_checkpoint_steps = save_checkpoint_steps
        if loss == 'CrossEntropyLossMean':
            self.loss_fn = SoftmaxCrossEntropyWithLogits(reduction='mean')
        elif loss == 'CrossEntropyLossSum':
            self.loss_fn = SoftmaxCrossEntropyWithLogits(reduction='sum')
        elif loss == 'CrossEntropyLossNone':
            self.loss_fn = SoftmaxCrossEntropyWithLogits()
        else:
            self.loss_fn = None
        self.opt = None

    def _model_train_and_save_ckpt(self, net, dataset, epoch, eval_network=None):
        self.opt = Momentum(learning_rate=self.learning_rate, momentum=self.momentum,
                            params=net.get_parameters())
        self.model = Model(network=net,
                           eval_network=eval_network,
                           loss_fn=self.loss_fn,
                           optimizer=self.opt,
                           metrics=self.metrics)
        ckpt_config = CheckpointConfig(keep_checkpoint_max=1,
                                       save_checkpoint_steps=self.save_checkpoint_steps)
        ckpt_path = './rank_{}_ckpt'.format(self.global_rank_id)
        ckpt_callback = ModelCheckpoint(prefix='parallel', directory=ckpt_path, config=ckpt_config)
        clean_all_ckpt_files(ckpt_path)
        self.model.train(epoch=epoch,
                         train_dataset=dataset,
                         callbacks=[ckpt_callback],
                         dataset_sink_mode=self.dataset_sink_mode)
        newest_ckpt_file = find_newest_ckpt_file(ckpt_path)
        return load_checkpoint(newest_ckpt_file)

    def __mindspore_impl(self, parallel_mode, net,
                         dataset, epoch,
                         device_num=None,
                         fully_use_devices=True,
                         tensor_slice_align_enable=False,
                         tensor_slice_align_size=16,
                         elementwise_op_strategy_follow=False,
                         full_batch=False,
                         strategy_ckpt_save_file="./strategy_stage1.ckpt",
                         strategy_ckpt_load_file="",
                         eval_network=None,
                         auto_parallel_search_mode='dynamic_programming',
                         dataset_strategy='data_parallel'):
        set_algo_parameters(tensor_slice_align_enable=tensor_slice_align_enable,
                            tensor_slice_align_size=tensor_slice_align_size,
                            fully_use_devices=fully_use_devices,
                            elementwise_op_strategy_follow=elementwise_op_strategy_follow)
        context.reset_auto_parallel_context()
        if device_num is None:
            context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                              full_batch=full_batch,
                                              strategy_ckpt_save_file=strategy_ckpt_save_file,
                                              strategy_ckpt_load_file=strategy_ckpt_load_file,
                                              search_mode=auto_parallel_search_mode,
                                              dataset_strategy=dataset_strategy)
        else:
            context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                              device_num=device_num,
                                              full_batch=full_batch,
                                              strategy_ckpt_save_file=strategy_ckpt_save_file,
                                              strategy_ckpt_load_file=strategy_ckpt_load_file,
                                              search_mode=auto_parallel_search_mode,
                                              dataset_strategy=dataset_strategy)
        ckpt_dict = self._model_train_and_save_ckpt(net=net, dataset=dataset, epoch=epoch,
                                                    eval_network=eval_network)
        context.reset_auto_parallel_context()
        return ckpt_dict

    def _mindspore_semi_parallel_impl(self, net, dataset,
                                      epoch, device_num,
                                      full_batch=False,
                                      strategy_ckpt_save_file="./strategy_stage1.ckpt",
                                      strategy_ckpt_load_file="",
                                      eval_network=None,
                                      dataset_strategy='data_parallel'):
        return self.__mindspore_impl(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
                                     net=net, dataset=dataset,
                                     epoch=epoch, device_num=device_num,
                                     full_batch=full_batch,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     eval_network=eval_network,
                                     dataset_strategy=dataset_strategy)

    def _mindspore_auto_parallel_impl(self, net, dataset,
                                      epoch, device_num,
                                      fully_use_devices=False,
                                      tensor_slice_align_enable=False,
                                      tensor_slice_align_size=16,
                                      elementwise_op_strategy_follow=False,
                                      full_batch=False,
                                      strategy_ckpt_save_file="./strategy_stage1.ckpt",
                                      strategy_ckpt_load_file="",
                                      eval_network=None,
                                      auto_parallel_search_mode='dynamic_programming',
                                      dataset_strategy='data_parallel'):
        return self.__mindspore_impl(parallel_mode=ParallelMode.AUTO_PARALLEL,
                                     net=net, dataset=dataset,
                                     epoch=epoch, device_num=device_num,
                                     tensor_slice_align_enable=tensor_slice_align_enable,
                                     tensor_slice_align_size=tensor_slice_align_size,
                                     fully_use_devices=fully_use_devices,
                                     elementwise_op_strategy_follow=elementwise_op_strategy_follow,
                                     full_batch=full_batch,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     eval_network=eval_network,
                                     auto_parallel_search_mode=auto_parallel_search_mode,
                                     dataset_strategy=dataset_strategy)

    def _mindspore_data_parallel_impl(self, net, dataset,
                                      epoch, device_num,
                                      fully_use_devices=False,
                                      tensor_slice_align_enable=False,
                                      tensor_slice_align_size=16,
                                      elementwise_op_strategy_follow=False,
                                      full_batch=False,
                                      strategy_ckpt_save_file="./strategy_stage1.ckpt",
                                      strategy_ckpt_load_file="",
                                      eval_network=None,
                                      dataset_strategy='data_parallel'):
        return self.__mindspore_impl(parallel_mode=ParallelMode.DATA_PARALLEL,
                                     net=net, dataset=dataset,
                                     epoch=epoch, device_num=device_num,
                                     tensor_slice_align_enable=tensor_slice_align_enable,
                                     tensor_slice_align_size=tensor_slice_align_size,
                                     fully_use_devices=fully_use_devices,
                                     elementwise_op_strategy_follow=elementwise_op_strategy_follow,
                                     full_batch=full_batch,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     eval_network=eval_network,
                                     dataset_strategy=dataset_strategy)

    def _mindspore_hybrid_parallel_impl(self, net, dataset,
                                        epoch, device_num,
                                        strategy_ckpt_save_file="./strategy_stage1.ckpt",
                                        strategy_ckpt_load_file=""):
        return self.__mindspore_impl(parallel_mode=ParallelMode.HYBRID_PARALLEL,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     net=net, dataset=dataset, epoch=epoch, device_num=device_num)

    def _mindspore_standalone_impl(self, net, dataset,
                                   epoch, full_batch=False,
                                   strategy_ckpt_save_file="",
                                   strategy_ckpt_load_file="",
                                   eval_network=None):
        return self.__mindspore_impl(parallel_mode=ParallelMode.STAND_ALONE,
                                     net=net, dataset=dataset,
                                     epoch=epoch, full_batch=full_batch,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     eval_network=eval_network)

    def _model_eval(self, dataset):
        output = self.model.eval(dataset, dataset_sink_mode=self.dataset_sink_mode)
        return output

    def __mindspore_eval(self, parallel_mode, dataset,
                         strategy_ckpt_load_file="./strategy_stage1.ckpt",
                         strategy_ckpt_save_file="",
                         full_batch=False):
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                          full_batch=full_batch,
                                          strategy_ckpt_load_file=strategy_ckpt_load_file,
                                          strategy_ckpt_save_file=strategy_ckpt_save_file)
        output = self._model_eval(dataset=dataset)
        context.reset_auto_parallel_context()
        return output

    def mindspore_semi_parallel_impl(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_auto_parallel_impl(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_data_parallel_impl(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_standalone_impl(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_semi_prallel_eval(self, dataset,
                                    strategy_ckpt_load_file="./strategy_stage1.ckpt",
                                    strategy_ckpt_save_file="", full_batch=False):
        return self.__mindspore_eval(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
                                     dataset=dataset,
                                     full_batch=full_batch,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file)

    def mindspore_hybrid_prallel_eval(self, dataset,
                                      strategy_ckpt_load_file="./strategy_stage1.ckpt",
                                      strategy_ckpt_save_file="", full_batch=False):
        return self.__mindspore_eval(parallel_mode=ParallelMode.HYBRID_PARALLEL,
                                     dataset=dataset,
                                     full_batch=full_batch,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file)

    def mindspore_standalone_eval(self, dataset, strategy_ckpt_load_file="", full_batch=False):
        return self.__mindspore_eval(parallel_mode=ParallelMode.STAND_ALONE,
                                     dataset=dataset,
                                     full_batch=full_batch,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file)

    def mindspore_auto_parallel_eval(self, dataset,
                                     strategy_ckpt_load_file="./strategy_stage1.ckpt",
                                     strategy_ckpt_save_file="", full_batch=False):
        return self.__mindspore_eval(parallel_mode=ParallelMode.AUTO_PARALLEL,
                                     dataset=dataset,
                                     full_batch=full_batch,
                                     strategy_ckpt_load_file=strategy_ckpt_load_file,
                                     strategy_ckpt_save_file=strategy_ckpt_save_file)

    def checkpoint_cmp(self, *args, **kwargs):
        raise NotImplementedError

    def mindspore_eval_result_cmp(self, output1, output2):
        for metrics_key in self.metrics:
            allclose_nparray(output1[metrics_key], output2[metrics_key], self.accuracy_assert,
                             self.accuracy_assert)


class AnyNetFactory(MetaFactory):
    class AnyNet(Cell):
        def __init__(self, op_obj, *primitve_constant_args):
            super().__init__()
            self.net = op_obj
            self.primitve_constant_args = primitve_constant_args

        def construct(self, *inputs):
            return self.net(*inputs, *self.primitve_constant_args)

    def __init__(self, net, *primitve_constant_args):
        super().__init__()
        if isinstance(net, Cell):
            self.net = net
        elif isinstance(net, Primitive):
            self.net = AnyNetFactory.AnyNet(net, *primitve_constant_args)
        else:
            raise ValueError('unrecognized net type')

    def __call__(self, *args):
        return self.net(*args)
