from copy import deepcopy
from functools import partial
import logging
from pathlib import Path
from subprocess import call
from typing import Dict, NoReturn, Optional, Sequence, Union
import warnings

import numpy as np
import onnx
import torch
import torch.nn as nn
from mmengine.config import Config
from mmdeploy.apis import build_task_processor
from mmdeploy.apis.core import PIPELINE_MANAGER
from mmdeploy.apis.onnx.optimizer import *  # noqa
from mmdeploy.apis.onnx.passes import optimize_onnx
from mmdeploy.core import RewriterContext, patch_model
from mmdeploy.apis.core.pipeline_manager import no_mp
from mmdeploy.utils import Backend, IR, get_backend, get_backend_config, \
                           get_ir_config, get_onnx_config
from mmdet.apis import init_detector

from .onnx_passes import build_pass_executer
from ..preprocess import ImagePreprocessor
from ..utils import create_data_sample
from .revised_codes.config_utils import get_dynamic_axes


class ModelConverter:
    def __init__(self,
                 config_file: str,
                 task: str ='det',
                 input_shape: Optional[Sequence[int]] = None
                ) -> NoReturn:

        assert task in ['det', 'seg'], \
            'Only Support two task: det(ObjectDetection) and ' \
            f'seg(InstanceSegmentation), but received: {task}'
        self.task = task
        self.config_file = config_file
        self.model_cfg = Config.fromfile(config_file)
        self.model_type = self.model_cfg.model.type

        # set deploy config, input shape and backend
        self.deploy_cfg = None
        self._set_deploy_config()
        self.input_shape = None
        self._set_input_shape(input_shape)
        self.backend = get_backend(self.deploy_cfg).value

        # set context info for exporting onnx and modifing onnx
        self.context_info = None
        self._set_context_info()
        self.onnx_pass_executer = build_pass_executer(self.model_type)

        self.demo_image = './demo/test_images/demo.jpg'
        self.preprocessor = ImagePreprocessor(self.model_cfg, self.input_shape)

    def _set_deploy_config(self) -> NoReturn:
        deploy_config_dir = Path(__file__).parent / 'deploy_configs'
        if self.task == 'det':
            deploy_cfg = Config.fromfile(
                str(deploy_config_dir / 'object_detection.py'))
        else:
            deploy_cfg = Config.fromfile(
                str(deploy_config_dir / 'instance_segmentation.py'))

        # set export arguments
        onnx_cfg = get_onnx_config(deploy_cfg)
        opset_version = onnx_cfg.get('opset_version', 11)
        keep_initializers_as_inputs = onnx_cfg.get(
            'keep_initializers_as_inputs', True)
        verbose = not onnx_cfg.get('strip_doc_string', True) or \
                onnx_cfg.get('verbose', False)
        onnx_cfg.update(
            opset_version=opset_version,
            keep_initializers_as_inputs=keep_initializers_as_inputs,
            verbose=verbose
        )
        backend = get_backend(deploy_cfg).value
        if 'backend_config' in deploy_cfg:
            if isinstance(deploy_cfg['backend_config'], dict):
                deploy_cfg['backend_config']['type'] = backend
            else:
                deploy_cfg['backend_config'] = dict(type=backend)
        else:
            deploy_cfg['backend_config'] = dict(type=backend)
        self.deploy_cfg = deploy_cfg

    def _set_input_shape(self, input_shape: Optional[Sequence[int]] = None
                        ) -> NoReturn:
        if not input_shape:
            input_shape = self.deploy_cfg.onnx_config.input_shape

        if isinstance(input_shape, int):
            assert input_shape > 0
            input_shape = (input_shape, input_shape)
        else:
            assert all(x > 0 for x in input_shape)
            if len(input_shape) == 1:
                input_shape = (input_shape[0], input_shape[0])
            elif len(input_shape) == 2:
                input_shape = tuple(input_shape)
            else:
                raise Exception(f'Invalid input shape: {input_shape}')

        self.input_shape = input_shape
        self.deploy_cfg.onnx_config.input_shape = input_shape

        bkd_cfg = self.deploy_cfg.backend_config
        bkd_input_shape = bkd_cfg.model_inputs.input_shapes.get('input', None)
        if bkd_input_shape and bkd_input_shape != input_shape:
            warnings.warn(f'the model input shape {bkd_input_shape} in backend '
                          f'config will be overwrited with {input_shape}.',
                          category=RuntimeWarning)
        bkd_cfg.model_inputs.input_shapes.input = input_shape

    def _set_context_info(self) -> NoReturn:
        context_info = dict(
            ir=IR.get(get_ir_config(self.deploy_cfg)['type']),
            cfg=deepcopy(self.deploy_cfg)
        )
        if 'backend' not in context_info:
            context_info['backend'] = self.backend
        onnx_cfg = get_onnx_config(self.deploy_cfg)
        if 'opset' not in context_info:
            context_info['opset'] = onnx_cfg.opset_version
        if 'onnx_custom_passes' not in context_info:
            onnx_custom_passes = optimize_onnx if onnx_cfg.optimize else None
            context_info['onnx_custom_passes'] = onnx_custom_passes
        self.context_info = context_info

    def create_trace_data(self,
                          input_image: Optional[str] = None,
                          batch_size: int = 1
                         ) -> Dict[str, Union[np.ndarray, list, str]]:
        if not input_image:
            input_image = self.demo_image
        img, img_meta = self.preprocessor.preprocess(input_image)
        model_inputs = torch.from_numpy(img)
        data_sample = create_data_sample(img_meta)
        data_samples = [data_sample]
        if batch_size > 1:
            model_inputs = model_inputs.expand(batch_size, -1, -1, -1)
            data_samples = data_samples * batch_size
        trace_data = dict(inputs=model_inputs,
                          data_samples=data_samples,
                          mode='predict')
        return trace_data

    def init_model(self, checkpoint_file: str) -> nn.Module:
        # init_detector(self.config_file, self.weight_file, device='cpu')
        task_processor = build_task_processor(self.model_cfg, self.deploy_cfg,
                                              device='cpu')
        return task_processor.build_pytorch_model(checkpoint_file)

    def patch_model(self,
                    pytorch_model: nn.Module,
                    trace_data: Dict[str, Union[np.ndarray, list, str]]
                   ) -> nn.Module:
        # patch model
        patched_model = patch_model(pytorch_model,
                                    cfg=self.deploy_cfg,
                                    backend=self.backend,
                                    ir=self.context_info['ir'])
        # patch input_metas
        input_metas = dict(
            data_samples=trace_data['data_samples'],
            mode=trace_data['mode']
        )

        if input_metas is not None:
            assert isinstance(input_metas, dict), \
                f'Expect input_metas type is dict, get {type(input_metas)}.'
            model_forward = patched_model.forward

            def wrap_forward(forward):
                def wrapper(*arg, **kwargs):
                    return forward(*arg, **kwargs)
                return wrapper

            patched_model.forward = wrap_forward(patched_model.forward)
            patched_model.forward = partial(patched_model.forward,
                                            **input_metas)
        return patched_model

    def pytorch2onnx(self,
                     model: nn.Module,
                     trace_data: Dict[str, Union[np.ndarray, list, str]],
                     save_file: str = None,
                     use_dynamic_batch_size: bool = False
                    ) -> NoReturn:
        assert save_file, 'Must provide a path to save ONNX!'
        onnx_cfg = get_onnx_config(self.deploy_cfg)
        model_inputs = deepcopy(trace_data['inputs'])
        dynamic_axes = None
        if use_dynamic_batch_size:
            # axis_names = onnx_cfg.input_names
            axis_names = onnx_cfg.input_names + onnx_cfg.output_names
            dynamic_axes = {name: {0: '-1'} for name in axis_names}

        torch.onnx.export(
            model,
            model_inputs,
            save_file,
            export_params=onnx_cfg.export_params,
            input_names=onnx_cfg.input_names,
            output_names=onnx_cfg.output_names,
            opset_version=onnx_cfg.opset_version,
            dynamic_axes=dynamic_axes,
            keep_initializers_as_inputs=onnx_cfg.keep_initializers_as_inputs,
            verbose=onnx_cfg.verbose
        )
        print(f'Successfully exported ONNX model: {save_file}')

    def execute_onnx_passes(self,
                            onnx_file: str,
                            save_file: Optional[str] = None
                           ) -> NoReturn:
        if not save_file:
            save_file = onnx_file
        res = self.onnx_pass_executer.execute(onnx_file, save_file)
        if res == 0:
            print(f'Successfully modified ONNX model, save in {save_file}')

    def generate_static_om(self,
                           onnx_file: str,
                           save_file: str = None,
                           batch_size: int = None,
                           soc_version: Optional[str] = None
                          ) -> NoReturn:
        assert save_file, 'Must provide a path to save OM!'
        assert isinstance(batch_size, int) and batch_size > 0

        bkd_cfg = get_backend_config(self.deploy_cfg)
        input_shapes = bkd_cfg.model_inputs.input_shapes
        assert len(input_shapes) == 1, 'At present support only single input.'
        input_name, dims = list(input_shapes.items())[0]
        input_shape_arg = f'{input_name}:{batch_size},3,{dims[0]},{dims[1]}'

        if soc_version is None:
            soc_version = bkd_cfg.get('soc_version', None)
        assert soc_version, 'Must specify `soc_version` for atc command.'

        atc_command = [
            'atc', f'--model={onnx_file}', '--framework=5',
            f'--output={save_file.rstrip(".om")}', f'--input_format=NCHW',
            f'--input_shape={input_shape_arg}', f'--soc_version={soc_version}'
        ]

        print(' '.join(atc_command))
        ret_code = call(atc_command)
        assert ret_code == 0, 'Convert ONNX to OM failed!'
        print(f'Successfully exported OM model: {save_file}')

    def generate_dynamic_batch_om(self,
                                  onnx_file: str,
                                  save_file: str = None,
                                  batch_sizes: Sequence[int] = None,
                                  soc_version: Optional[str] = None
                                 ) -> NoReturn:
        assert save_file, 'Must provide a path to save OM!'

        bkd_cfg = get_backend_config(self.deploy_cfg)
        input_shapes = bkd_cfg.model_inputs.input_shapes
        assert len(input_shapes) == 1, 'At present support only single input.'
        input_name, dims = list(input_shapes.items())[0]
        input_shape_arg = f'{input_name}:-1,3,{dims[0]},{dims[1]}'

        if soc_version is None:
            soc_version = bkd_cfg.get('soc_version', None)
        assert soc_version, 'Must specify `soc_version` for atc command.'

        atc_command = [
            'atc', f'--model={onnx_file}', '--framework=5',
            f'--output={save_file.rstrip(".om")}', f'--input_format=NCHW',
            f'--input_shape={input_shape_arg}', f'--soc_version={soc_version}'
        ]

        # check batch sizes and make argument string
        if len(batch_sizes) < 2:
            raise ValueError('Specify at least two batch size if use argument '
                             '`--dynamic_batch_size`.')
        elif 2 <= len(batch_sizes) <= 100:
            assert all(1 <= x <= 2048 for x in batch_sizes), \
                f'Invalid batch size list: {batch_sizes}, valid value range' \
                ' of each batch size is [1, 2048].'
            atc_command.extend([
                f'--dynamic_batch_size={",".join(map(str, batch_sizes))}'])
        else:
            raise ValueError('Argument `--dynamic_batch_size` at most receive '
                             ' 100 batch size value.')

        print(' '.join(atc_command))
        ret_code = call(atc_command)
        assert ret_code == 0, 'Convert ONNX to OM failed!'
        print(f'Successfully exported OM model: {save_file}')


    def convert(self,
                checkpoint_file: str,
                input_image: Optional[str] = None,
                save_prefix: str = None,
                batch_sizes: Sequence[int] =None,
                soc_version: Optional[str] =None,
                use_dynamic_batch_size: bool = False
              ) -> NoReturn:
        bkd_cfg = get_backend_config(self.deploy_cfg)
        if not batch_sizes:
            batch_sizes = bkd_cfg.model_inputs.batch_sizes
        batch_sizes = sorted(set(batch_sizes))
        assert len(batch_sizes) > 0, 'Specify at least one batch size!'

        # convert ONNX to dynamic batch size OM
        if use_dynamic_batch_size and len(batch_sizes) >= 2:
            trace_data = self.create_trace_data(input_image)

            # initial pytorch model and convert to ONNX
            pytorch_model = self.init_model(checkpoint_file)
            onnx_save_file = save_prefix + '_dybs.onnx'
            with no_mp(), RewriterContext(**self.context_info), torch.no_grad():
                patched_model = self.patch_model(pytorch_model, trace_data)
                self.pytorch2onnx(patched_model, trace_data, onnx_save_file,
                                  use_dynamic_batch_size=True)

            # modify ONNX and convert to OM
            self.execute_onnx_passes(onnx_save_file)
            om_save_file = save_prefix + '_dybs.om'
            self.generate_dynamic_batch_om(onnx_save_file, om_save_file,
                                           batch_sizes, soc_version)
            return

        # convert ONNX to static OM
        for batch_size in batch_sizes:
            trace_data = self.create_trace_data(input_image, batch_size)

            # initial pytorch model and convert to ONNX
            pytorch_model = self.init_model(checkpoint_file)
            onnx_save_file = save_prefix + f'_bs{batch_size}.onnx'
            with no_mp(), RewriterContext(**self.context_info), torch.no_grad():
                patched_model = self.patch_model(pytorch_model, trace_data)
                self.pytorch2onnx(patched_model, trace_data, onnx_save_file)

            # modify ONNX and convert to OM
            self.execute_onnx_passes(onnx_save_file)
            om_save_file = save_prefix + f'_bs{batch_size}.om'
            self.generate_static_om(onnx_save_file, om_save_file, batch_size,
                                    soc_version)

    @classmethod
    def fast_convert(cls,
                     config_file: str,
                     checkpoint_file: str,
                     task: str = 'det',
                     input_shape: Optional[Sequence[int]] = None,
                     input_image: Optional[str] = None,
                     save_prefix: str = None,
                     batch_sizes: Sequence[int] = None,
                     soc_version: Optional[str] = None,
                     use_dynamic_batch_size: bool = False
                    ) -> NoReturn:
        converter = cls(config_file, task=task, input_shape=input_shape)
        converter.convert(checkpoint_file,
                          input_image=input_image,
                          save_prefix=save_prefix,
                          batch_sizes=batch_sizes,
                          soc_version=soc_version,
                          use_dynamic_batch_size=use_dynamic_batch_size)
