# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import onnx
import numpy as np
import onnx_graphsurgeon as gs
from onnxsim import simplify
from onnx import helper
from torch.onnx import symbolic_helper
from torch.onnx.symbolic_helper import parse_args
import torch

@gs.Graph.register()
def replace_with_clip(self, inputs, outputs):
    for inp in inputs:
        inp.outputs.clear()

    for out in outputs:
        out.inputs.clear()

    op_attrs = dict()
    op_attrs["dense_shape"] = np.array([496,432])

    return self.layer(name="PPScatter_0", op="PPScatterPlugin", inputs=inputs, outputs=outputs, attrs=op_attrs)

def loop_node(graph, current_node, loop_time=0):
  for i in range(loop_time):
    next_node = [node for node in graph.nodes if len(node.inputs) != 0 and len(current_node.outputs) != 0 and node.inputs[0] == current_node.outputs[0]][0]
    current_node = next_node
  return next_node

def simplify_postprocess(onnx_model):
  print("Use onnx_graphsurgeon to adjust postprocessing part in the onnx...")
  graph = gs.import_onnx(onnx_model)
  # inferred_model = shape_inference.infer_shapes(graph)
  target_node_name = "DpsCostVolumePlugin_1"
  target_node = None
  node_idx = None
  for idx, node in enumerate(graph.nodes):
    if node.name == target_node_name:
      target_node = node
      node_idx = idx
      break

  total_nodes = len(graph.nodes)
  
  if target_node:
    output_shape = [1, 64, 72, 120, 232]
    # output_type = helper.make_tensor_type('float', output_shape)
    # for i in range(len(output_shape)):
    target_node.outputs[0].dtype = onnx.TensorProto.FLOAT
    target_node.outputs[0].shape = output_shape
    graph.nodes[node_idx] = target_node

  for idx in range(node_idx+1, total_nodes):
    current_node = graph.nodes[idx]
    if current_node.inputs[0].shape == None:
      previous_node = graph.nodes[idx - 1]
      output_shape = previous_node.outputs[0].shape
      output_type = previous_node.outputs[0].dtype
      # channels = current_node.inputs[1].shape(0)
      # output_shape[1] = channels
      current_node.inputs[0].shape = output_shape
      current_node.inputs[0].dtype = output_type
    if current_node.outputs[0].shape==None:
      if "Conv" in current_node.name:
        output_shape = current_node.inputs[0].shape
        channels = current_node.inputs[1].shape[0]
        # output_shape[1] = channels
        current_node.outputs[0].shape = [output_shape[0], channels, output_shape[2], output_shape[3], output_shape[4]]
      elif "Relu" in current_node.name:
        current_node.outputs[0].shape = current_node.inputs[0].shape
      current_node.outputs[0].dtype = current_node.inputs[0].dtype
    if "Add_5" in current_node.name:
      current_node.outputs[0].shape = current_node.inputs[0].shape
    if "GridSamplePluginDynamic" in current_node.name:
      current_node.outputs[0].shape = [1, 32, 20, 304, 288]
    if "Cast_7" in current_node.name:
      current_node.outputs[0].shape = current_node.inputs[0].shape

    graph.nodes[idx] = current_node
  
  return gs.export_onnx(graph)

def convert_namespace(onnx_model, initializers):
  print("Use onnx_graphsurgeon to adjust namespace part in the onnx...")
  graph = gs.import_onnx(onnx_model)
  target_node_name = 'MMCVDeformConv2d_1823'
  
def convert_dtype(onnx_model, initializers):
  print("Use onnx_graphsurgeon to convert dtype part in the onnx...")

  graph = gs.import_onnx(onnx_model)
  for i, node in enumerate(graph.nodes):
    for j, input in enumerate(node.inputs):
      input_name = input.name
      is_constant = any(input_name == init.name for init in initializers)
      if is_constant and input.values.dtype == np.int64:
        values = input.values.astype(np.int32)
        graph.nodes[i].inputs[j].values = values
    # if "Gather" in node.name or "Pad" in node.name:
    #   # values = node.inputs[1].values
    #   # shape = node.inputs[1].shape
    #   # dtype = onnx.TensorProto.INT32
    #   values = node.inputs[1].values
    #   values = values.astype(np.int32)
    #   # tensor = gs.Constant(name= "", values=values)
    #   # node.inputs[1].values.astype(np.int32)
    #   graph.nodes[i].inputs[1].values = values
    # if "Reshape" in node.name:
    #   values = node.inputs[1].values.astype(np.int32)
    #   graph.nodes[i].inputs[1].values = values 
      # graph.nodes[i].inputs[1].dtype = onnx.TensorProto.INT32
  return gs.export_onnx(graph)
    # attributes = node.attrsnode
    # if len(attributes) > 0:
    #   for attr in attributes:
    #     if attr[1].ty
    # print(node)
    # if node.attributes is not None:
def fusion_unfold_layer(onnx_model):
  # unfold_layer = onnx.helper.make_node()
  graph = gs.import_onnx(onnx_model=onnx_model)
  unfold_start_name = "Pad"
  unfold_end_name = "Reshape"
  num_unfold_nodes = 5
  unfold_pipline_name = ['Pad', "Gather", "Gather", "Transpose", "Reshape"]
  num_nodes = len(graph.nodes)
  unfold_idx = 0
  for i in range(num_nodes-5):
    unfold_flag = True
    for j in range(i, i+5):
      node = graph.nodes[j]
      if not unfold_pipline_name[j-i] in node.name:
        unfold_flag = False
    if unfold_flag:
      if unfold_idx == 0:
        op_attrs={
          "kernel_size": [7, 7],
          "dilation": [1, 1],
          "padding": [2, 2],
          "stride": [4, 4]
        }
      else:
        op_attrs={
          "kernel_size": [3, 3],
          "dilation": [1, 1],
          "padding": [1, 1],
          "stride": [2, 2]
        }
      inputs = [graph.nodes[i].inputs[0]]
      outputs = [graph.nodes[i+4].outputs[0]]
      new_node = create_unfold_node(inputs, outputs, op_attrs, unfold_idx)
      for j in range(i, i+5):
        graph.nodes.remove(graph.nodes[j])
      graph.nodes.insert(i, new_node)
      unfold_idx += 1
  # graph.cleanup().toposort()
  return gs.export_onnx(graph=graph)

def create_unfold_node(inputs, outputs, op_attrs, unfold_idx):
  input_names = [inp.name for inp in inputs]
  output_names = [out.name for out in outputs]
  unfold_node = helper.make_node(op_type="nvinfer1::TorchUnfoldPluginDynamicPlugin",
                                  name=f"TorchUnfoldPluginDynamic_{unfold_idx}",
                                  inputs=input_names, outputs=output_names
                                  )
  
  unfold_node.attribute.extend([helper.make_attribute(k, v) for k, v in op_attrs.items()])
  return unfold_node
  

@gs.Graph.register()
def replace_with_clip(self, inputs, outputs, op_attrs, unfold_idx):
  for inp in inputs:
    inp.outputs.clear()
  for out in outputs:
    out.inputs.clear()
  return self.layer(name=f"TorchUnfoldPluginDynamic_{unfold_idx}", 
                    op="TorchUnfoldPluginDynamic",
                    outputs=outputs, attrs=op_attrs)

def register():
    """
    Register ONNX Runtime's built-in contrib ops.
	Should be run before torch.onnx.export().
	"""
    # @staticmethod
    # @parse_args('v', 'i', 'i', 'i', 'b')
    def unfold(g, input, kernel_size, dilation, padding, stride):
      kernel_size = symbolic_helper._maybe_get_const(kernel_size, "is")
      dilation = symbolic_helper._maybe_get_const(dilation, "is")
      padding = symbolic_helper._maybe_get_const(padding, "is")
      stride = symbolic_helper._maybe_get_const(stride, "is")
      inputs = [input]
      kwargs = {
          "kernel_size_is":kernel_size, 
          "dilation_is":dilation, 
          "padding_is": padding,
          "stride_is": stride}

      return g.op(
          "nvinfer1::TorchUnfoldPluginDynamic",  
          *inputs, **kwargs
        )

    _reg(unfold)


import typing
_OPSET_VERSION = 12
_registered_ops: typing.AbstractSet[str] = set()


def _reg(symbolic_fn: typing.Callable):
	name = "::%s" % symbolic_fn.__name__
	torch.onnx.register_custom_op_symbolic(name, symbolic_fn, _OPSET_VERSION)
	_registered_ops.add(name)
  

if __name__ == '__main__':
  register()
  model_file = "dsgn.onnx"
  model_raw = onnx.load(model_file)
  initializers = model_raw.graph.initializer
  # onnx_trim_post = simplify_postprocess(model_raw)
  # onnx_post = convert_dtype(model_raw, initializers)
  onnx_post = fusion_unfold_layer(model_raw)
  # onnx_simp, check = simplify(onnx_post)
  # assert check, "Simplified ONNX model could not be validated"
  # onnx_final = simplify_preprocess(onnx_simp)
  onnx.save(onnx_post, "dsgn_1.onnx")
  print('finished exporting onnx')
  # simplify_preprocess(onnx.load(mode_file))
