from loguru import logger
from model.net import CLSNetV7
from my_utils.parsing_util import POOLING_SIZE
from tvm_graph.node import Node
import math
import torch
import os

POOL_SIZE_CLASSIFIER_PATH = "/home/kxp/workspace/tvm-reversion/results/2022-03-20-18-23-02-(train_one_for_all_attr_fix_arm.py)/checkpoints/model_ckpt_198.pth"


class ValidateUtil:
    def __init__(self) -> None:
        pass

    @staticmethod
    def validateNode(node: Node) -> Node:
        # logger.debug("Validating node {} ...".format(node.name))
        in_shape = node.in_shape
        out_shape = node.out_shape
        if node.function_type == "Cropping2D":
            # Keras shape is [B, H. W, C]
            tmp_in_shape = in_shape[0]
            if len(tmp_in_shape) == 3:
                tmp_in_shape = [1] + tmp_in_shape
            if "cropping" not in node.function_param:
                node.function_param["cropping"] = 0, 0, 0, 0
            h_top, h_bottom, w_left, w_right = node.function_param["cropping"]
            if tmp_in_shape[1] - h_top - h_bottom != out_shape[1]:
                node.function_param["cropping"][1] = tmp_in_shape[1] - out_shape[1] - h_top
            if tmp_in_shape[2] - w_left - w_right != out_shape[2]:
                node.function_param["cropping"][3] = tmp_in_shape[2] - out_shape[2] - w_left
        elif node.function_type == "Conv2D":

            # Check filter
            if 'filters' not in node.function_param or out_shape[-1] != node.function_param["filters"]:
                node.function_param["filters"] = out_shape[-1]

            # Check Depthwise
            if out_shape[-1] == in_shape[0][-1]:
                weight_shape = None
                for extra in node.extra_input:
                    if len(extra.out_shape) > 1:
                        weight_shape = extra.out_shape
                        break
                if weight_shape[-1] == 1 and weight_shape[-2] == in_shape[0][-1]:
                    node.function_type = "DepthwiseConv2D"

            # Check kernel size
            if "kernel_size" not in node.function_param:
                node.function_param["kernel_size"] = 1
            tmp_kernel_size = -1
            if len(node.extra_input) == 1:
                tmp_kernel_size = node.extra_input[0].out_shape[1]
            else:
                for extra in node.extra_input:
                    logger.debug(extra.out_shape)
                    if extra.is_input:
                        continue
                    if len(extra.out_shape) > 1 and extra.out_shape[3] == node.function_param["filters"]:
                        tmp_kernel_size = extra.out_shape[0]
                        break
            if tmp_kernel_size != -1 and tmp_kernel_size != node.function_param["kernel_size"]:
                node.function_param["kernel_size"] = tmp_kernel_size

            # Check the strides
            if 'strides' not in node.function_param:
                node.function_param['strides'] = [1, 1]
            h_out, w_out = out_shape[1], out_shape[2]
            h_in, w_in = in_shape[0][1], in_shape[0][2]
            tmp_stirdes = [1, 1]
            if h_out != 1 and w_out != 1:
                tmp_stirdes = [
                    round((h_in - tmp_kernel_size) / (h_out - 1)),
                    round((w_in - tmp_kernel_size) / (w_out - 1))
                ]
            if node.function_param["strides"][0] != tmp_stirdes[0]:
                node.function_param["strides"][0] = tmp_stirdes[0]
            if node.function_param["strides"][1] != tmp_stirdes[1]:
                node.function_param["strides"][1] = tmp_stirdes[1]

            # Check padding
            k = node.function_param["kernel_size"]
            strides = node.function_param["strides"]

            # Check the padding !
            if int((h_in - k) / strides[0] + 1) == h_out:
                node.function_param["padding"] = "valid"
            else:
                node.function_param["padding"] = "same"

            # Check the DepthwiseConv2D
            if node.function_type == "DepthwiseConv2D":
                in_channel = node.in_shape[0][-1]
                out_channel = node.out_shape[-1]
                node.function_param["depth_multiplier"] = out_channel // in_channel

        elif node.function_type == "MaxPooling2D" or node.function_type == "MaxPooling3D" or node.function_type == "AveragePooling2D" or node.function_type == "AveragePooling3D":
            if "pool_size" not in node.function_param:
                node.function_param["pool_size"] = None

            pool_size = round(in_shape[0][1] / out_shape[1])
            if len(in_shape[0]) == 3:
                pool_size = round(in_shape[0][0] / out_shape[1])

            padding = "valid"
            if len(in_shape[0]) == 3:
                if in_shape[0][0] // (pool_size + 1e-15) < out_shape[1]:
                    padding = "same"
            elif in_shape[0][1] // (pool_size + 1e-15) < out_shape[1]:
                padding = "same"

            strides = max(pool_size, 1)
            if padding == "valid":
                while math.floor((in_shape[0][1] - pool_size) / strides) + 1 != out_shape[1]:
                    if math.floor((in_shape[0][1] - pool_size) / strides) + 1 > out_shape[1]:
                        pool_size += 1
                    else:
                        pool_size -= 1
            node.function_param["strides"] = strides

            node.function_param["pool_size"] = pool_size

            # if node.function_type == "MaxPooling2D" and (strides == 2 or strides == 3):
            #     # Using model to infer the pool_size
            #     device = torch.device(
            #         "cuda:{}".format(os.environ["CUDA_VISIBLE_DEVICES"]) if torch.cuda.is_available() else "cpu")
            #     net = CLSNetV7(in_d=200, hiden=200, num_layers=2, out_classes=len(POOLING_SIZE))
            #     net.load_state_dict({
            #         k.replace('module.', ''): v
            #         for k, v in torch.load(POOL_SIZE_CLASSIFIER_PATH, map_location=device).items()
            #     })
            #     net.to(device)
            #     pred = net(node.bb_data, device)
            #     pred = pred[0]
            #     pool_size = POOLING_SIZE[pred.topk(1)[1][0]]
            #     node.function_param["pool_size"] = pool_size
            #     logger.debug("Infer Pool Size: {} for node [{}]".format(pool_size, node.name))
            #     del net

            if "padding" not in node.function_param or node.function_param["padding"] != padding:
                node.function_param["padding"] = padding

            # if node.function_param["padding"] == "same" and node.function_param[
            #         "strides"] == 2 and node.function_param["pool_size"] != 1:
            #     node.function_param["pool_size"] = 3

        elif node.function_type == "UpSampling" or node.function_type == "UpSampling3D":
            size = out_shape[1] // in_shape[0][1]
            if len(in_shape[0]) == 3:
                size = out_shape[0] // in_shape[0][0]
            if size != node.function_param["size"]:
                node.function_param["size"] = size
        elif node.function_type == "ZeroPadding2D":
            if "padding" not in node.function_param:
                node.function_param["padding"] = (0, 0, 0, 0)
            tmp_in_shape = in_shape[0]
            h_pad = (out_shape[1] - tmp_in_shape[1]) // 2
            w_pad = (out_shape[2] - tmp_in_shape[2]) // 2
            if h_pad == w_pad:
                node.function_param["padding"] = (h_pad, h_pad, h_pad, h_pad)
            if h_pad == 0 and out_shape[1] != tmp_in_shape[1]:
                node.function_param["padding"] = (0, (out_shape[1] - tmp_in_shape[1]), 0,
                                                  (out_shape[2] - tmp_in_shape[2]))

        else:
            # TODO: Add more validation of these parameters
            pass
        return node

    @staticmethod
    def correct_wrong_node(node: Node) -> Node:
        assert node.has_wrong_pred
        old_atom_type = node.atomic_type
        # if "pad" in node.name:
        #     logger.debug("pad in shape: {}".format(node.in_shape))
        #     logger.debug("pad out shape: {}".format(node.out_shape))
        #     logger.debug("pad extra input num: {}".format(len(node.extra_input)))
        #     logger.debug("pad father nodes: {}".format([fn.name for fn in node.father_nodes]))
        #     logger.debug("pad extra nodes out shape: {}".format([x.out_shape for x in node.extra_input]))
        if len(node.in_shape[0]) > 1 and node.in_shape[0][1] < node.out_shape[1] and len(node.in_shape[0]) == len(
                node.out_shape) and (len(node.extra_input) == 0 or
                                     (len(node.extra_input) == 1 and len(node.extra_input[0].out_shape) == 0)):
            node.atomic_type = "pad"
            logger.debug("Fix Padding!")
            return node
        if node.atomic_type == "add" and node.wrong_pred_category == node.WEIGHT_NUM_WRONG:
            node.atomic_type = "bias_add"
            logger.debug("Correct type [{}] -> [{}] for node[{}]".format(old_atom_type, node.atomic_type, node.name))
            return node
        if node.atomic_type == "bias_add" and node.wrong_pred_category == node.WEIGHT_NUM_WRONG:
            node.atomic_type = "add"
            logger.debug("Correct type [{}] -> [{}] for node[{}]".format(old_atom_type, node.atomic_type, node.name))
            return node

        next_pred_idx = node.function_param["topk"].index(node.atomic_type) + 1

        if node.wrong_pred_category == Node.INPUT_NUM_WRONG:
            if len(node.in_shape) == 2 and len(
                    node.extra_input) == 0 and len(node.in_shape[0]) != len(node.in_shape[1]):
                node.atomic_type = "multiply"
            else:
                node.atomic_type = node.function_param["topk"][next_pred_idx]

        if node.wrong_pred_category == Node.WEIGHT_NUM_WRONG:
            node.atomic_type = node.function_param["topk"][next_pred_idx]

        if node.wrong_pred_category == Node.WEIGHT_DIM_WRONG:
            node.atomic_type = node.function_param["topk"][next_pred_idx]

        if node.wrong_pred_category == Node.INPUT_DIM_WRONG:
            node.atomic_type = node.function_param["topk"][next_pred_idx]

        if node.wrong_pred_category == Node.OUTPUT_DIM_WRONG:
            node.atomic_type = node.function_param["topk"][next_pred_idx]

        if node.wrong_pred_category == Node.SHAPE_CHANGE_WRONG:
            # logger.debug("Topk: {}".format(node.function_param["topk"]))
            node.atomic_type = node.function_param["topk"][next_pred_idx]

        logger.debug("Correct type [{}] -> [{}] for node[{}]".format(old_atom_type, node.atomic_type, node.name))
        return node
