import re
import os
import math
import numpy as np
import torch
import torch.nn as nn


class ConvParam:
    CI_TILE, CO_TILE = 4, 4

    def __init__(self,
                 attr_str="inputNHWC_1x224x224x3_outputNHWC_1x112x112x32_kernelHW_3x3_strideHW_2x2_padTopBottomLeftRight_0x1x0x1_dilationHW_1x1"):
        nums = re.findall(r"\d+\.?\d*", attr_str)
        nums = list(map(int, nums))
        N, IH, IW, CI, _, OH, OW, CO, KH, KW, strideH, strideW, padTop, padBottom, padLeft, padRight, dilationH, dilationW = nums

        self.N = N
        self.CI, self.CO = CI, CO
        # self.CI, self.CO = 3, 32
        # self.CI, self.CO = 3, 31
        # self.CI, self.CO = 5, 33
        self.IH, self.IW = IH, IW
        self.OH, self.OW = OH, OW
        self.KH, self.KW = KH, KW
        self.strideH, self.strideW = strideH, strideW
        self.padTop, self.padBottom, self.padLeft, self.padRight = padTop, padBottom, padLeft, padRight
        self.dilationH, self.dilationW = dilationH, dilationW

    def use_winograd(self):
        tiles_x = int(math.ceil(self.OW / 4))
        tiles_y = int(math.ceil(self.OH / 4))
        suitable_attributes = (self.KH, self.KW) == (3, 3) \
                              and (self.strideH, self.strideW) == (1, 1) \
                              and (self.padTop, self.padBottom, self.padLeft, self.padRight) == (1, 1, 1, 1) \
                              and (self.dilationH, self.dilationW) == (1, 1)
        CI_SLICES = int(math.ceil(self.CI / self.CI_TILE))
        CO_SLICES = int(math.ceil(self.CO / self.CO_TILE))
        min_depth = 16  # device.IsMali() ? 16 : 32;
        min_hw = 32  # device.IsMali() ? 32 : 128;
        recommended_channels = CO_SLICES % 4 == 0 and CI_SLICES >= min_depth and CO_SLICES >= min_depth
        recommended_hw = tiles_x * tiles_y >= min_hw
        return suitable_attributes and recommended_channels and recommended_hw


def convert_weight_OIHW_to_OHWIIO(param: ConvParam, weight_OIHW):
    # convert weight: CO CI KH KW -> CO/4 KH KW CI/4  4(CI)  4(CO)
    CI, CO = param.CI, param.CO
    CI_TILE, CO_TILE = param.CI_TILE, param.CO_TILE
    CI_ALIGN = int(math.ceil(CI / CI_TILE)) * CI_TILE
    CO_ALIGN = int(math.ceil(CO / CO_TILE)) * CO_TILE

    if CO % CO_TILE:
        co_pad = np.zeros((CO_ALIGN - CO, CI, param.KH, param.KW)).astype(np.float32)
        weight_pad_co = np.concatenate((weight_OIHW, co_pad), axis=0)
    else:
        weight_pad_co = weight_OIHW

    if CI % CI_TILE:
        ci_pad = np.zeros((weight_pad_co.shape[0], CI_ALIGN - CI, param.KH, param.KW)).astype(np.float32)
        weight_pad_co_ci = np.concatenate((weight_pad_co, ci_pad), axis=1)
    else:
        weight_pad_co_ci = weight_pad_co

    OHWIIO = math.ceil(CO / CO_TILE), CO_TILE, math.ceil(CI / CI_TILE), CI_TILE, param.KH, param.KW
    weight_OHWIIO = np.reshape(weight_pad_co_ci, OHWIIO)
    return weight_OHWIIO.transpose((0, 4, 5, 2, 3, 1))


def create_input(param: ConvParam):
    np.random.seed(0)
    input_NCHW = np.random.uniform(-1, 1, size=(param.N, param.CI, param.IH, param.IW)).astype(np.float32)
    weight_OIHW = np.random.uniform(-1, 1, size=(param.CO, param.CI, param.KH, param.KW)).astype(np.float32)
    bias = np.random.uniform(-1, 1, size=(param.CO,)).astype(np.float32)
    weight_OHWIIO = convert_weight_OIHW_to_OHWIIO(param, weight_OIHW)
    return input_NCHW, weight_OIHW, weight_OHWIIO, bias


def run_pytorch(param: ConvParam, input_NCHW: np.ndarray, weight_OIHW: np.ndarray, bias: np.ndarray, check_winograd):
    model = nn.Conv2d(in_channels=param.CI,
                      out_channels=param.CO,
                      kernel_size=(param.KH, param.KW),
                      stride=(param.strideH, param.strideW),
                      padding=0,
                      dilation=(param.dilationH, param.dilationW),
                      bias=True)
    model.bias.data = torch.tensor(bias)
    input_pad = np.pad(input_NCHW, ((0, 0), (0, 0), (param.padTop, param.padBottom), (param.padLeft, param.padRight)),
                       'constant', constant_values=0)
    output_NCHW = model._conv_forward(torch.tensor(input_pad), torch.tensor(weight_OIHW)).detach().numpy()

    if param.use_winograd():
        print("could use winograd")
    # output_NHWC4 = output_NCHW.reshape(param.N, param.CO, param.OH, param.OW).transpose(0, 2, 3, 1)
    # print("output_NCHW4", output_NHWC4.flatten()[:10])
    # output_NHWC4_cl = np.fromfile("output_mem.bin", dtype=np.float32).reshape(output_NHWC4.shape)
    # np.testing.assert_allclose(output_NHWC4_cl.flatten(), output_NHWC4.flatten(), rtol=1e-4, atol=1e-4)
    if check_winograd and param.use_winograd():
        Bt = np.array([[1.0000000000, 0.0000000596, -2.5000004768, -0.0000001192, 1.0000001192, 0.0000000000],
                       [0.0000000000, 0.9428091049, 1.3333333731, -0.4714044929, -0.6666667461, 0.0000000000],
                       [0.0000000000, -0.9428090453, 1.3333334923, 0.4714045525, -0.6666667461, 0.0000000000],
                       [0.0000000000, -0.1178511307, -0.0833333358, 0.2357022613, 0.1666666865, 0.0000000000],
                       [0.0000000000, 0.1178511456, -0.0833333507, -0.2357022911, 0.1666666865, 0.0000000000],
                       [0.0000000000, 1.0000000000, -0.0000000596, -2.5000000000, 0.0000000000, 1.0000000000]],
                      dtype=np.float32)
        B = Bt.transpose()
        Gt = np.array([[1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 0.0000000000],
                       [0.0000000000, 0.7071067691, -0.7071067691, 1.4142135382, -1.4142135382, 0.0000000000],
                       [0.0000000000, 0.4999999702, 0.4999999702, 1.9999998808, 1.9999998808, 1.0000000000]],
                      dtype=np.float32)
        G = Gt.transpose()
        At = np.array([[1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 1.0000000000, 0.0000000000],
                       [0.0000000000, 0.7071067691, -0.7071067691, 1.4142135382, -1.4142135382, 0.0000000000],
                       [0.0000000000, 0.4999999702, 0.4999999702, 1.9999998808, 1.9999998808, 0.0000000000],
                       [0.0000000000, 0.3535533845, -0.3535533845, 2.8284270763, -2.8284270763, 1.0000000000]],
                      dtype=np.float32)
        A = At.transpose()
        tile_w = math.ceil(param.IW / 4)
        tile_h = math.ceil(param.IH / 4)

        # M = [GgG'] · [B'dB]
        # Y = A'MA
        # kernel0: NCHW -> N CI TILE_H TILE_W 6 6
        _4x4To36_output = np.zeros((param.N, param.CI, tile_h, tile_w, 6, 6), dtype=np.float32)
        for c in range(param.CI):
            for h in range(tile_h):
                for w in range(tile_w):
                    tmp = np.dot(Bt, input_pad[0, c, h * 4:h * 4 + 6, w * 4:w * 4 + 6])
                    _4x4To36_output[0, c, h, w] = np.dot(tmp, B)
        _4x4To36_output_NHWC = _4x4To36_output.transpose((0, 2, 3, 4, 5, 1))
        _4x4To36_output_NCHW4 = _4x4To36_output.reshape(
            (param.N, int(math.ceil(param.CI / 4)), 4, tile_h, tile_w, 6, 6)).transpose((0, 1, 5, 6, 3, 4, 2))
        # _4x4To36_output_NHWC.tofile("_4x4To36_output_NHWC.bin")
        # print("_4x4To36_output: ", _4x4To36_output_NHWC.flatten()[:10])
        # workspace0_mem = np.fromfile("workspace0_mem.bin", dtype=np.float32)
        # workspace0_mem_NHWC = workspace0_mem.reshape((param.N,  tile_h, tile_w, 6, 6, param.CI))
        # np.testing.assert_allclose(workspace0_mem, _4x4To36_output_NHWC.flatten(), rtol=1e-4, atol=1e-4)
        # np.testing.assert_allclose(workspace0_mem, _4x4To36_output_NCHW4.flatten(), rtol=1e-4, atol=1e-4)

        # kernel1 weight: CO CI 6 6
        weight_winograd = np.zeros((param.CO, param.CI, 6, 6), dtype=np.float32)
        for co in range(param.CO):
            for ci in range(param.CI):
                weight_winograd[co, ci] = np.dot(np.dot(G, weight_OIHW[co, ci]), Gt)
        # weight_O66I = weight_winograd.transpose((0, 2, 3, 1))
        # weight_O66I_read = np.fromfile("weight_O66I.bin", dtype=np.float32).reshape(weight_O66I.shape)

        weight_winograd7D = weight_winograd.reshape(
            (int(math.ceil(param.CO / 8)), 2, 4, int(math.ceil(param.CI / 4)), 4, 6, 6)).transpose(
            (0, 5, 6, 3, 1, 4, 2))
        # print(weight_winograd7D.shape)
        # print("weight 7D", weight_winograd7D.flatten()[:10])
        print(weight_winograd7D.flatten()[:10])
        # weight_winograd7D.tofile("weight_winograd7D.bin")

        # kernel1
        _36To4x4_input = np.zeros((param.N, param.CO, tile_h, tile_w, 6, 6), dtype=np.float32)
        for co in range(param.CO):
            for ci in range(param.CI):
                _36To4x4_input[0, co] += _4x4To36_output[0, ci] * weight_winograd[co, ci]
                # if co == 0:
                #     print("in=%.3f w=%.3f out=%.3f" % (_4x4To36_output[0, ci].flatten()[1],
                #                                        weight_winograd[co, ci].flatten()[1],
                #                                        _36To4x4_input[0, co].flatten()[1]))
                #     if ci % 4 == 3:
                #         print()
        # print(_36To4x4_input.shape)
        _36To4x4_input_NCHW4 = _36To4x4_input.reshape(
            (param.N, int(math.ceil(param.CO // 4)), 4, tile_h * tile_w, 6 * 6)).transpose(
            (0, 1, 4, 3, 2))
        # print("_36To4x4_input_NCHW4", _36To4x4_input_NCHW4.flatten()[:10])
        # workspace1_mem = np.fromfile("workspace1_mem.bin", dtype=np.float32)
        # workspace1_mem = workspace1_mem.reshape(param.N, int(math.ceil(param.CO // 4)), 36, tile_h * tile_w, 4)
        # np.testing.assert_allclose(workspace1_mem.flatten(), _36To4x4_input_NCHW4.flatten(), rtol=1e-4, atol=1e-4)

        # kernel2
        _36To4x4_output = np.zeros((param.N, param.CO, tile_h, tile_w, 4, 4), dtype=np.float32)
        for c in range(param.CO):
            for h in range(tile_h):
                for w in range(tile_w):
                    _36To4x4_output[0, c, h, w] = np.dot(np.dot(At, _36To4x4_input[0, c, h, w]), A)
                    # if c==0 and h==0 and w==0:
                    #     print(_36To4x4_input[0, c, h, w])
                    #     print(np.dot(At, _36To4x4_input[0, c, h, w]))
                    #     print(_36To4x4_output[0, c, h, w])

        _36To4x4_output = _36To4x4_output.transpose((0, 1, 2, 4, 3, 5)).reshape((param.N, param.CO, param.OH, param.OW))
        _36To4x4_output_bias = (_36To4x4_output.transpose(0, 2, 3, 1) + bias).transpose(0, 3, 1, 2)

        # print("_36To4x4_output", _36To4x4_output.flatten()[:10])
        # print("_36To4x4_output_bias", _36To4x4_output_bias.flatten()[:10])
        # print("output_NCHW", output_NCHW.flatten()[:10])
        output_NHWC4 = output_NCHW.reshape(param.N, param.CO, param.OH, param.OW).transpose(0, 2, 3, 1)
        # print("output_NCHW4", output_NHWC4.flatten()[:10])

        # output_NHWC4_cl = np.fromfile("output_mem.bin", dtype=np.float32).reshape(output_NHWC4.shape)
        # np.testing.assert_allclose(output_NHWC4_cl.flatten(), output_NHWC4.flatten(), rtol=1e-4, atol=1e-4)
        # print(np.fabs(_36To4x4_output_bias - output_NCHW).max())
        # np.testing.assert_allclose(_36To4x4_output_bias, output_NCHW, rtol=1e-4, atol=1e-4)
    else:
        weight_winograd7D = output_NCHW
    return output_NCHW, output_NCHW


def run_opencl(param: ConvParam, input_NCHW: np.ndarray, weight_OHWIIO: np.ndarray, bias: np.ndarray):
    CI, CO = param.CI, param.CO
    CI_TILE, CO_TILE = param.CI_TILE, param.CO_TILE
    N = param.N
    IH, IW = param.IH, param.IW
    OH, OW = param.OH, param.OW
    KH, KW = param.KH, param.KW
    strideH, strideW = param.strideH, param.strideW
    padTop, padBottom, padLeft, padRight = param.padTop, param.padBottom, param.padLeft, param.padRight
    dilationH, dilationW = param.dilationH, param.dilationW

    def kernel(w, h, co_outer):
        if w >= dst_size[0] or h >= dst_size[1] or co_outer >= dst_size[2]:
            return
        r000 = np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32)
        for kh in range(0, KH):
            y = kh + h * strideH - padTop
            for kw in range(0, KW):
                x = kw + w * strideW - padLeft
                for ci_outter in range(0, math.ceil(CI / CI_TILE)):
                    # float4 = float4 * float1
                    # src00 = read_imagef(src_data, smp_zero, (int2)(xck0, yck0 * src_size.z));
                    for ci_inner in range(CI_TILE):
                        ci = ci_outter * CI_TILE + ci_inner
                        if ci >= CI:
                            break
                        value = 0 if y < 0 or y >= IH or x < 0 or x >= IW else input_NCHW[0][ci][y][x]
                        r000 += weight_OHWIIO[co_outer][kh][kw][ci_outter][ci_inner][:] * value
                        # print("input[0][%d][%d][%d]=%f" % (ci, y, x, value))

        for i in range(4):
            if co_outer * CO_TILE + i < CO:
                output[0][co_outer * CO_TILE + i][h][w] = r000[i] + bias[co_outer * CO_TILE + i]
                # output[0][co * CO_TILE + 1][h][w] = r000[1] + bias[co * CO_TILE + 1]
                # output[0][co * CO_TILE + 2][h][w] = r000[2] + bias[co * CO_TILE + 2]
                # output[0][co * CO_TILE + 3][h][w] = r000[3] + bias[co * CO_TILE + 3]

    # tflite args
    stride_padding = strideW, strideH, -padLeft, -padTop
    kernel_dilation = KW, KH, dilationW, dilationH
    src_size = IW, IH, math.ceil(CI / CI_TILE), N
    dst_size = OW, OH, math.ceil(CO / CO_TILE), N
    output = np.zeros((N, CO, OH, OW), dtype=np.float32)

    grid = OW, OH, math.ceil(CO / CO_TILE)
    # grid = 2, 2, 8
    for w in range(0, grid[0]):
        for h in range(0, grid[1]):
            for co in range(0, grid[2]):
                print("w h co", w, h, co)
                kernel(w, h, co)
    return output


def save_data(path, param, input_NCHW, weight_OIHW, weight_OHWIIO, bias, output_NCHW, weight_winograd7D,
              save_dtype=np.float32):
    if not os.path.exists(path):
        print("path doesn't exist. trying to make", path)
        os.makedirs(path)
    weight_winograd7D.tofile(os.path.join(path, "weight_winograd7D.bin"))

    if param.CI % param.CI_TILE:
        pad_shape = list(input_NCHW.shape)
        pad_shape[1] = param.CI_TILE - param.CI % param.CI_TILE
        input_NC4HW = np.concatenate((input_NCHW, np.zeros(pad_shape, input_NCHW.dtype)), axis=1)
        input_NHWC4 = input_NC4HW.transpose((0, 2, 3, 1))
    else:
        input_NHWC4 = input_NCHW.transpose((0, 2, 3, 1))
    input_NCHW.transpose((0, 2, 3, 1)).astype(save_dtype).tofile(os.path.join(path, "input_NHWC.bin"))
    input_NHWC4.astype(save_dtype).tofile(os.path.join(path, "input_NHWC4.bin"))

    input_NHWC4_5D_shape = list(input_NHWC4.shape[:-1]) + [input_NHWC4.shape[-1] // 4, 4]  # N H W C/4 4
    input_NC4HW4 = input_NHWC4.reshape(input_NHWC4_5D_shape).transpose((0, 3, 1, 2, 4))  # N C/4 H W 4
    input_NC4HW4.astype(save_dtype).tofile(os.path.join(path, "input_NC4HW4.bin"))
    input_NHC4W4 = input_NHWC4.reshape(input_NHWC4_5D_shape).transpose((0, 1, 3, 2, 4))  # N H C/4 W 4
    input_NHC4W4.astype(save_dtype).tofile(os.path.join(path, "input_NHC4W4.bin"))

    weight_OIHW.transpose((0, 2, 3, 1)).astype(save_dtype).tofile(os.path.join(path, "weight_OHWI.bin"))
    weight_OHWIIO.astype(save_dtype).tofile(os.path.join(path, "weight_OHWIIO.bin"))

    if param.CO % param.CO_TILE:
        bias_C4 = np.concatenate((bias, np.zeros((param.CO - param.CO % param.CO_TILE,), bias.dtype)), axis=0)
    else:
        bias_C4 = bias
    bias.astype(save_dtype).tofile(os.path.join(path, "bias_C.bin"))
    bias_C4.astype(save_dtype).tofile(os.path.join(path, "bias_C4.bin"))

    if param.CO % param.CO_TILE:
        pad_shape = list(output_NCHW.shape)
        pad_shape[1] = param.CO_TILE - param.CO % param.CO_TILE
        output_NC4HW = np.concatenate((output_NCHW, np.zeros(pad_shape, output_NCHW.dtype)), axis=1)
        output_NHWC4 = output_NC4HW.transpose((0, 2, 3, 1))
    else:
        output_NHWC4 = output_NCHW.transpose((0, 2, 3, 1))
    output_NCHW.transpose((0, 2, 3, 1)).astype(save_dtype).tofile(os.path.join(path, "expect_NHWC.bin"))
    output_NHWC4.astype(save_dtype).tofile(os.path.join(path, "expect_NHWC4.bin"))

    output_NHWC4_5D_shape = list(output_NHWC4.shape[:-1]) + [output_NHWC4.shape[-1] // 4, 4]  # N H W C/4 4
    output_NC4HW4 = output_NHWC4.reshape(output_NHWC4_5D_shape).transpose((0, 3, 1, 2, 4))  # N C/4 H W 4
    output_NC4HW4.astype(save_dtype).tofile(os.path.join(path, "expect_NC4HW4.bin"))
    output_NHC4W4 = output_NHWC4.reshape(output_NHWC4_5D_shape).transpose((0, 1, 3, 2, 4))  # N H C/4 W 4
    output_NHC4W4.astype(save_dtype).tofile(os.path.join(path, "expect_NHC4W4.bin"))


testcases_group = [
    # ("mobilenetv2", [
    #     "inputNHWC_1x224x224x3_outputNHWC_1x112x112x32_kernelHW_3x3_strideHW_2x2_padTopBottomLeftRight_0x1x0x1_dilationHW_1x1",
    #     #     "inputNHWC_1x112x112x32_outputNHWC_1x112x112x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x112x112x16_outputNHWC_1x112x112x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x56x56x96_outputNHWC_1x56x56x24_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x56x56x24_outputNHWC_1x56x56x144_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x56x56x144_outputNHWC_1x56x56x24_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x56x56x24_outputNHWC_1x56x56x144_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x144_outputNHWC_1x28x28x32_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x32_outputNHWC_1x28x28x192_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x192_outputNHWC_1x28x28x32_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x32_outputNHWC_1x28x28x192_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x192_outputNHWC_1x28x28x32_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x28x28x32_outputNHWC_1x28x28x192_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x192_outputNHWC_1x14x14x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x64_outputNHWC_1x14x14x384_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x384_outputNHWC_1x14x14x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x64_outputNHWC_1x14x14x384_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x384_outputNHWC_1x14x14x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x64_outputNHWC_1x14x14x384_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x384_outputNHWC_1x14x14x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x64_outputNHWC_1x14x14x384_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x384_outputNHWC_1x14x14x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x96_outputNHWC_1x14x14x576_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x576_outputNHWC_1x14x14x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x96_outputNHWC_1x14x14x576_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x576_outputNHWC_1x14x14x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x14x14x96_outputNHWC_1x14x14x576_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x576_outputNHWC_1x7x7x160_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x160_outputNHWC_1x7x7x960_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x960_outputNHWC_1x7x7x160_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x160_outputNHWC_1x7x7x960_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x960_outputNHWC_1x7x7x160_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x160_outputNHWC_1x7x7x960_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x960_outputNHWC_1x7x7x320_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     #     "inputNHWC_1x7x7x320_outputNHWC_1x7x7x1280_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1"
    # ]),
    # ("02", [
    #     "inputNHWC_1x32x512x50_outputNHWC_1x32x512x48_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x32x512x50_outputNHWC_1x32x512x48_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x36x256x96_outputNHWC_1x36x256x80_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x16x256x96_outputNHWC_1x16x256x80_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x16x256x160_outputNHWC_1x16x256x100_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x36x256x100_outputNHWC_1x36x256x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x16x256x100_outputNHWC_1x16x256x96_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x8x128x192_outputNHWC_1x8x128x100_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x8x128x100_outputNHWC_1x8x128x250_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x8x128x250_outputNHWC_1x8x128x100_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x8x128x100_outputNHWC_1x8x128x300_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x4x64x300_outputNHWC_1x4x64x150_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x4x64x150_outputNHWC_1x4x64x350_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x4x64x350_outputNHWC_1x4x64x150_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x4x64x150_outputNHWC_1x4x64x400_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x1x64x400_outputNHWC_1x1x64x512_kernelHW_1x4_strideHW_1x1_padTopBottomLeftRight_0x0x1x2_dilationHW_1x1",
    #     "inputNHWC_1x1x64x512_outputNHWC_1x1x64x7358_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    # ]),
    # ("08", [
    #     "inputNHWC_1x960x960x3_outputNHWC_1x480x480x32_kernelHW_3x3_strideHW_2x2_padTopBottomLeftRight_0x1x0x1_dilationHW_1x1",
    #     "inputNHWC_1x480x480x32_outputNHWC_1x480x480x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x480x480x32_outputNHWC_1x480x480x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x480x480x16_outputNHWC_1x240x240x32_kernelHW_3x3_strideHW_2x2_padTopBottomLeftRight_0x1x0x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x64_outputNHWC_1x240x240x32_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x32_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x32_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x64_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x64_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x96_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x96_outputNHWC_1x240x240x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x16_outputNHWC_1x240x240x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x240x240x128_outputNHWC_1x240x240x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x240x240x128_outputNHWC_1x240x240x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x128_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x128_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x160_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x160_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x192_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x192_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x224_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x224_outputNHWC_1x120x120x16_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x16_outputNHWC_1x120x120x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x120x120x256_outputNHWC_1x120x120x256_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x120x120x256_outputNHWC_1x120x120x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x256_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x256_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x288_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x288_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x320_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x320_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x352_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x352_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x384_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x384_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x416_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x416_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x448_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x448_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x480_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x480_outputNHWC_1x60x60x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x64_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x16_outputNHWC_1x60x60x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x60x60x512_outputNHWC_1x60x60x512_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x60x60x512_outputNHWC_1x60x60x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x512_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x512_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x544_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x544_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x576_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x576_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x608_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x608_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x640_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x640_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x672_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x672_outputNHWC_1x30x30x64_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x64_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x16_outputNHWC_1x30x30x16_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    #     "inputNHWC_1x30x30x704_outputNHWC_1x30x30x512_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x30x30x512_outputNHWC_1x30x30x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x36x14400x128_outputNHWC_1x36x14400x128_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    #     "inputNHWC_1x480x480x128_outputNHWC_1x480x480x1_kernelHW_1x1_strideHW_1x1_padTopBottomLeftRight_0x0x0x0_dilationHW_1x1",
    # ]),
    ("test", [
        # "inputNHWC_1x1x64x400_outputNHWC_1x1x64x512_kernelHW_1x4_strideHW_1x1_padTopBottomLeftRight_0x0x1x2_dilationHW_1x1",
        # "inputNHWC_1x480x480x128_outputNHWC_1x480x480x128_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        # "inputNHWC_1x32x512x1_outputNHWC_1x32x512x50_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        # "inputNHWC_1x32x512x50_outputNHWC_1x32x512x48_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        #
        # "inputNHWC_1x16x256x96_outputNHWC_1x16x256x80_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        # "inputNHWC_1x16x256x100_outputNHWC_1x16x256x96_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        # "inputNHWC_1x480x480x128_outputNHWC_1x480x480x128_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        #
        # "inputNHWC_1x32x512x1_outputNHWC_1x32x512x50_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
        "inputNHWC_1x27x27x48_outputNHWC_1x27x27x192_kernelHW_3x3_strideHW_1x1_padTopBottomLeftRight_1x1x1x1_dilationHW_1x1",
    ]),
]

if __name__ == '__main__':
    cache_cases = set()
    for model, testcases in testcases_group:
        for testcase in testcases:
            if model + testcase in cache_cases:
                continue
            print(model, testcase)
            param = ConvParam(testcase)
            input_NCHW, weight_OIHW, weight_OHWIIO, bias = create_input(param)
            expect, weight_winograd7D = run_pytorch(param, input_NCHW, weight_OIHW, bias, check_winograd=False)
            save_data("testcases/" + model + "_fp32/" + testcase, param,
                      input_NCHW, weight_OIHW, weight_OHWIIO, bias, expect, weight_winograd7D, save_dtype=np.float32)
            # save_data("testcases/" + model + "_fp16/" + testcase, param,
            #           input_NCHW, weight_OIHW, weight_OHWIIO, bias, expect, weight_winograd7D, save_dtype=np.float16)
            cache_cases.add(model + testcase)

    # output = run_opencl(param, input_NCHW, weight_OHWIIO, bias)
    # print(expect.flatten()[:10])
    # print(output.flatten()[:10])
    # np.testing.assert_allclose(expect, output, rtol=1e-5, atol=1e-5)
    # print("max absolute errer", np.fabs(expect - output).max())
    # print("max relative errer", (np.fabs(expect - output) / np.fabs(output)).max())
    # # print(np.where(d == d.max()))
