/* auto generate by HHB_VERSION 2.4.5 */

#include <csi_nn.h>

void *csinn_(char *params_base) {
  struct csinn_session *sess = csinn_alloc_session();
  sess->base_run_mode = CSINN_RM_CPU_GRAPH;
  sess->base_quant_type = CSINN_QUANT_FLOAT16;
  sess->model.save_mode = CSINN_RUN_ONLY;
  sess->base_api = CSINN_C906;
  sess->base_dtype = CSINN_DTYPE_FLOAT16;
  sess->dynamic_shape = CSINN_FALSE;
  csinn_session_init(sess);
  csinn_set_input_number(1, sess);
  csinn_set_output_number(1, sess);

  struct csinn_tensor *input = csinn_alloc_tensor(sess);
  input->name = "input@@conv2d_Conv_0_PART_0_0_fuse_bias_add_Conv_0_1_0";
  input->dtype = CSINN_DTYPE_FLOAT16;
  input->layout = CSINN_LAYOUT_NCHW;
  input->dim[0] = 1;
  input->dim[1] = 3;
  input->dim[2] = 224;
  input->dim[3] = 224;
  input->dim_count = 4;
  input->qinfo = (struct csinn_quant_info *)(params_base + 0);
  input->quant_channel = 1;
  struct csinn_tensor *output_0 = csinn_alloc_tensor(sess);
  output_0->name = "output_0";
  output_0->dtype = CSINN_DTYPE_FLOAT16;
  output_0->layout = CSINN_LAYOUT_NCHW;
  output_0->dim[0] = 1;
  output_0->dim[1] = 32;
  output_0->dim[2] = 112;
  output_0->dim[3] = 112;
  output_0->dim_count = 4;
  output_0->qinfo = (struct csinn_quant_info *)(params_base + 24);
  output_0->quant_channel = 1;
  struct csinn_tensor *kernel_0 = csinn_alloc_tensor(sess);
  kernel_0->name = "kernel_0";
  kernel_0->data = params_base + 104;
  kernel_0->is_const = 1;
  kernel_0->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_0->dtype = CSINN_DTYPE_FLOAT16;
  kernel_0->layout = CSINN_LAYOUT_OIHW;
  kernel_0->dim[0] = 32;
  kernel_0->dim[1] = 3;
  kernel_0->dim[2] = 3;
  kernel_0->dim[3] = 3;
  kernel_0->dim_count = 4;
  kernel_0->qinfo = (struct csinn_quant_info *)(params_base + 48);
  kernel_0->quant_channel = 1;
  struct csinn_tensor *bias_0 = csinn_alloc_tensor(sess);
  bias_0->name = "bias_0";
  bias_0->data = params_base + 1856;
  bias_0->is_const = 1;
  bias_0->dtype = CSINN_DTYPE_FLOAT16;
  bias_0->layout = CSINN_LAYOUT_O;
  bias_0->dim[0] = 32;
  bias_0->dim_count = 1;
  bias_0->qinfo = (struct csinn_quant_info *)(params_base + 1832);
  bias_0->quant_channel = 1;
  struct csinn_conv2d_params *params_0 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_0->group = 1;
  params_0->stride_height = 2;
  params_0->stride_width = 2;
  params_0->dilation_height = 1;
  params_0->dilation_width = 1;
  params_0->conv_extra.kernel_tm = NULL;
  params_0->conv_extra.conv_mode = CSINN_DIRECT;
  params_0->pad_top = 1;
  params_0->pad_left = 1;
  params_0->pad_down = 1;
  params_0->pad_right = 1;
  params_0->base.name = "conv2d_Conv_0_PART_0_0_fuse_bias_add_Conv_0_1";
  params_0->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(input, output_0, kernel_0, bias_0, params_0);
  struct csinn_tensor *output_1 = csinn_alloc_tensor(sess);
  output_1->name = "output_1";
  output_1->dtype = CSINN_DTYPE_FLOAT16;
  output_1->layout = CSINN_LAYOUT_NCHW;
  output_1->dim[0] = 1;
  output_1->dim[1] = 32;
  output_1->dim[2] = 112;
  output_1->dim[3] = 112;
  output_1->dim_count = 4;
  output_1->qinfo = (struct csinn_quant_info *)(params_base + 1920);
  output_1->quant_channel = 1;
  struct csinn_relu_params *params_1 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_1->n = 6;
  params_1->base.name = "clip_Clip_1_2";
  params_1->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_0, output_1, params_1);
  struct csinn_tensor *output_2 = csinn_alloc_tensor(sess);
  output_2->name = "output_2";
  output_2->dtype = CSINN_DTYPE_FLOAT16;
  output_2->layout = CSINN_LAYOUT_NCHW;
  output_2->dim[0] = 1;
  output_2->dim[1] = 32;
  output_2->dim[2] = 112;
  output_2->dim[3] = 112;
  output_2->dim_count = 4;
  output_2->qinfo = (struct csinn_quant_info *)(params_base + 1944);
  output_2->quant_channel = 1;
  struct csinn_tensor *kernel_2 = csinn_alloc_tensor(sess);
  kernel_2->name = "kernel_2";
  kernel_2->data = params_base + 2024;
  kernel_2->is_const = 1;
  kernel_2->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_2->dtype = CSINN_DTYPE_FLOAT16;
  kernel_2->layout = CSINN_LAYOUT_O1HW;
  kernel_2->dim[0] = 32;
  kernel_2->dim[1] = 1;
  kernel_2->dim[2] = 3;
  kernel_2->dim[3] = 3;
  kernel_2->dim_count = 4;
  kernel_2->qinfo = (struct csinn_quant_info *)(params_base + 1968);
  kernel_2->quant_channel = 1;
  struct csinn_tensor *bias_2 = csinn_alloc_tensor(sess);
  bias_2->name = "bias_2";
  bias_2->data = params_base + 2624;
  bias_2->is_const = 1;
  bias_2->dtype = CSINN_DTYPE_FLOAT16;
  bias_2->layout = CSINN_LAYOUT_O;
  bias_2->dim[0] = 32;
  bias_2->dim_count = 1;
  bias_2->qinfo = (struct csinn_quant_info *)(params_base + 2600);
  bias_2->quant_channel = 1;
  struct csinn_conv2d_params *params_2 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_2->group = 32;
  params_2->stride_height = 1;
  params_2->stride_width = 1;
  params_2->dilation_height = 1;
  params_2->dilation_width = 1;
  params_2->conv_extra.kernel_tm = NULL;
  params_2->conv_extra.conv_mode = CSINN_DIRECT;
  params_2->pad_top = 1;
  params_2->pad_left = 1;
  params_2->pad_down = 1;
  params_2->pad_right = 1;
  params_2->base.name = "conv2d_Conv_2_PART_0_3_fuse_bias_add_Conv_2_4";
  params_2->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_1, output_2, kernel_2, bias_2, params_2);
  struct csinn_tensor *output_3 = csinn_alloc_tensor(sess);
  output_3->name = "output_3";
  output_3->dtype = CSINN_DTYPE_FLOAT16;
  output_3->layout = CSINN_LAYOUT_NCHW;
  output_3->dim[0] = 1;
  output_3->dim[1] = 32;
  output_3->dim[2] = 112;
  output_3->dim[3] = 112;
  output_3->dim_count = 4;
  output_3->qinfo = (struct csinn_quant_info *)(params_base + 2688);
  output_3->quant_channel = 1;
  struct csinn_relu_params *params_3 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_3->n = 6;
  params_3->base.name = "clip_Clip_3_5";
  params_3->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_2, output_3, params_3);
  struct csinn_tensor *output_4 = csinn_alloc_tensor(sess);
  output_4->name = "output_4";
  output_4->dtype = CSINN_DTYPE_FLOAT16;
  output_4->layout = CSINN_LAYOUT_NCHW;
  output_4->dim[0] = 1;
  output_4->dim[1] = 16;
  output_4->dim[2] = 112;
  output_4->dim[3] = 112;
  output_4->dim_count = 4;
  output_4->qinfo = (struct csinn_quant_info *)(params_base + 2712);
  output_4->quant_channel = 1;
  struct csinn_tensor *kernel_4 = csinn_alloc_tensor(sess);
  kernel_4->name = "kernel_4";
  kernel_4->data = params_base + 2792;
  kernel_4->is_const = 1;
  kernel_4->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_4->dtype = CSINN_DTYPE_FLOAT16;
  kernel_4->layout = CSINN_LAYOUT_OIHW;
  kernel_4->dim[0] = 16;
  kernel_4->dim[1] = 32;
  kernel_4->dim[2] = 1;
  kernel_4->dim[3] = 1;
  kernel_4->dim_count = 4;
  kernel_4->qinfo = (struct csinn_quant_info *)(params_base + 2736);
  kernel_4->quant_channel = 1;
  struct csinn_tensor *bias_4 = csinn_alloc_tensor(sess);
  bias_4->name = "bias_4";
  bias_4->data = params_base + 3840;
  bias_4->is_const = 1;
  bias_4->dtype = CSINN_DTYPE_FLOAT16;
  bias_4->layout = CSINN_LAYOUT_O;
  bias_4->dim[0] = 16;
  bias_4->dim_count = 1;
  bias_4->qinfo = (struct csinn_quant_info *)(params_base + 3816);
  bias_4->quant_channel = 1;
  struct csinn_conv2d_params *params_4 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_4->group = 1;
  params_4->stride_height = 1;
  params_4->stride_width = 1;
  params_4->dilation_height = 1;
  params_4->dilation_width = 1;
  params_4->conv_extra.kernel_tm = NULL;
  params_4->conv_extra.conv_mode = CSINN_DIRECT;
  params_4->pad_top = 0;
  params_4->pad_left = 0;
  params_4->pad_down = 0;
  params_4->pad_right = 0;
  params_4->base.name = "conv2d_Conv_4_PART_0_6_fuse_bias_add_Conv_4_7";
  params_4->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_3, output_4, kernel_4, bias_4, params_4);
  struct csinn_tensor *output_5 = csinn_alloc_tensor(sess);
  output_5->name = "output_5";
  output_5->dtype = CSINN_DTYPE_FLOAT16;
  output_5->layout = CSINN_LAYOUT_NCHW;
  output_5->dim[0] = 1;
  output_5->dim[1] = 96;
  output_5->dim[2] = 112;
  output_5->dim[3] = 112;
  output_5->dim_count = 4;
  output_5->qinfo = (struct csinn_quant_info *)(params_base + 3872);
  output_5->quant_channel = 1;
  struct csinn_tensor *kernel_5 = csinn_alloc_tensor(sess);
  kernel_5->name = "kernel_5";
  kernel_5->data = params_base + 3944;
  kernel_5->is_const = 1;
  kernel_5->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_5->dtype = CSINN_DTYPE_FLOAT16;
  kernel_5->layout = CSINN_LAYOUT_OIHW;
  kernel_5->dim[0] = 96;
  kernel_5->dim[1] = 16;
  kernel_5->dim[2] = 1;
  kernel_5->dim[3] = 1;
  kernel_5->dim_count = 4;
  kernel_5->qinfo = (struct csinn_quant_info *)(params_base + 3896);
  kernel_5->quant_channel = 1;
  struct csinn_tensor *bias_5 = csinn_alloc_tensor(sess);
  bias_5->name = "bias_5";
  bias_5->data = params_base + 7040;
  bias_5->is_const = 1;
  bias_5->dtype = CSINN_DTYPE_FLOAT16;
  bias_5->layout = CSINN_LAYOUT_O;
  bias_5->dim[0] = 96;
  bias_5->dim_count = 1;
  bias_5->qinfo = (struct csinn_quant_info *)(params_base + 7016);
  bias_5->quant_channel = 1;
  struct csinn_conv2d_params *params_5 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_5->group = 1;
  params_5->stride_height = 1;
  params_5->stride_width = 1;
  params_5->dilation_height = 1;
  params_5->dilation_width = 1;
  params_5->conv_extra.kernel_tm = NULL;
  params_5->conv_extra.conv_mode = CSINN_DIRECT;
  params_5->pad_top = 0;
  params_5->pad_left = 0;
  params_5->pad_down = 0;
  params_5->pad_right = 0;
  params_5->base.name = "conv2d_Conv_5_PART_0_8_fuse_bias_add_Conv_5_9";
  params_5->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_4, output_5, kernel_5, bias_5, params_5);
  struct csinn_tensor *output_6 = csinn_alloc_tensor(sess);
  output_6->name = "output_6";
  output_6->dtype = CSINN_DTYPE_FLOAT16;
  output_6->layout = CSINN_LAYOUT_NCHW;
  output_6->dim[0] = 1;
  output_6->dim[1] = 96;
  output_6->dim[2] = 112;
  output_6->dim[3] = 112;
  output_6->dim_count = 4;
  output_6->qinfo = (struct csinn_quant_info *)(params_base + 7232);
  output_6->quant_channel = 1;
  struct csinn_relu_params *params_6 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_6->n = 6;
  params_6->base.name = "clip_Clip_6_10";
  params_6->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_5, output_6, params_6);
  struct csinn_tensor *output_7 = csinn_alloc_tensor(sess);
  output_7->name = "output_7";
  output_7->dtype = CSINN_DTYPE_FLOAT16;
  output_7->layout = CSINN_LAYOUT_NCHW;
  output_7->dim[0] = 1;
  output_7->dim[1] = 96;
  output_7->dim[2] = 56;
  output_7->dim[3] = 56;
  output_7->dim_count = 4;
  output_7->qinfo = (struct csinn_quant_info *)(params_base + 7256);
  output_7->quant_channel = 1;
  struct csinn_tensor *kernel_7 = csinn_alloc_tensor(sess);
  kernel_7->name = "kernel_7";
  kernel_7->data = params_base + 7336;
  kernel_7->is_const = 1;
  kernel_7->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_7->dtype = CSINN_DTYPE_FLOAT16;
  kernel_7->layout = CSINN_LAYOUT_O1HW;
  kernel_7->dim[0] = 96;
  kernel_7->dim[1] = 1;
  kernel_7->dim[2] = 3;
  kernel_7->dim[3] = 3;
  kernel_7->dim_count = 4;
  kernel_7->qinfo = (struct csinn_quant_info *)(params_base + 7280);
  kernel_7->quant_channel = 1;
  struct csinn_tensor *bias_7 = csinn_alloc_tensor(sess);
  bias_7->name = "bias_7";
  bias_7->data = params_base + 9088;
  bias_7->is_const = 1;
  bias_7->dtype = CSINN_DTYPE_FLOAT16;
  bias_7->layout = CSINN_LAYOUT_O;
  bias_7->dim[0] = 96;
  bias_7->dim_count = 1;
  bias_7->qinfo = (struct csinn_quant_info *)(params_base + 9064);
  bias_7->quant_channel = 1;
  struct csinn_conv2d_params *params_7 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_7->group = 96;
  params_7->stride_height = 2;
  params_7->stride_width = 2;
  params_7->dilation_height = 1;
  params_7->dilation_width = 1;
  params_7->conv_extra.kernel_tm = NULL;
  params_7->conv_extra.conv_mode = CSINN_DIRECT;
  params_7->pad_top = 1;
  params_7->pad_left = 1;
  params_7->pad_down = 1;
  params_7->pad_right = 1;
  params_7->base.name = "conv2d_Conv_7_PART_0_11_fuse_bias_add_Conv_7_12";
  params_7->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_6, output_7, kernel_7, bias_7, params_7);
  struct csinn_tensor *output_8 = csinn_alloc_tensor(sess);
  output_8->name = "output_8";
  output_8->dtype = CSINN_DTYPE_FLOAT16;
  output_8->layout = CSINN_LAYOUT_NCHW;
  output_8->dim[0] = 1;
  output_8->dim[1] = 96;
  output_8->dim[2] = 56;
  output_8->dim[3] = 56;
  output_8->dim_count = 4;
  output_8->qinfo = (struct csinn_quant_info *)(params_base + 9280);
  output_8->quant_channel = 1;
  struct csinn_relu_params *params_8 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_8->n = 6;
  params_8->base.name = "clip_Clip_8_13";
  params_8->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_7, output_8, params_8);
  struct csinn_tensor *output_9 = csinn_alloc_tensor(sess);
  output_9->name = "output_9";
  output_9->dtype = CSINN_DTYPE_FLOAT16;
  output_9->layout = CSINN_LAYOUT_NCHW;
  output_9->dim[0] = 1;
  output_9->dim[1] = 24;
  output_9->dim[2] = 56;
  output_9->dim[3] = 56;
  output_9->dim_count = 4;
  output_9->qinfo = (struct csinn_quant_info *)(params_base + 9304);
  output_9->quant_channel = 1;
  struct csinn_tensor *kernel_9 = csinn_alloc_tensor(sess);
  kernel_9->name = "kernel_9";
  kernel_9->data = params_base + 9384;
  kernel_9->is_const = 1;
  kernel_9->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_9->dtype = CSINN_DTYPE_FLOAT16;
  kernel_9->layout = CSINN_LAYOUT_OIHW;
  kernel_9->dim[0] = 24;
  kernel_9->dim[1] = 96;
  kernel_9->dim[2] = 1;
  kernel_9->dim[3] = 1;
  kernel_9->dim_count = 4;
  kernel_9->qinfo = (struct csinn_quant_info *)(params_base + 9328);
  kernel_9->quant_channel = 1;
  struct csinn_tensor *bias_9 = csinn_alloc_tensor(sess);
  bias_9->name = "bias_9";
  bias_9->data = params_base + 14016;
  bias_9->is_const = 1;
  bias_9->dtype = CSINN_DTYPE_FLOAT16;
  bias_9->layout = CSINN_LAYOUT_O;
  bias_9->dim[0] = 24;
  bias_9->dim_count = 1;
  bias_9->qinfo = (struct csinn_quant_info *)(params_base + 13992);
  bias_9->quant_channel = 1;
  struct csinn_conv2d_params *params_9 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_9->group = 1;
  params_9->stride_height = 1;
  params_9->stride_width = 1;
  params_9->dilation_height = 1;
  params_9->dilation_width = 1;
  params_9->conv_extra.kernel_tm = NULL;
  params_9->conv_extra.conv_mode = CSINN_DIRECT;
  params_9->pad_top = 0;
  params_9->pad_left = 0;
  params_9->pad_down = 0;
  params_9->pad_right = 0;
  params_9->base.name = "conv2d_Conv_9_PART_0_14_fuse_bias_add_Conv_9_15";
  params_9->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_8, output_9, kernel_9, bias_9, params_9);
  struct csinn_tensor *output_11 = csinn_alloc_tensor(sess);
  output_11->name = "output_11";
  output_11->dtype = CSINN_DTYPE_FLOAT16;
  output_11->layout = CSINN_LAYOUT_NCHW;
  output_11->dim[0] = 1;
  output_11->dim[1] = 144;
  output_11->dim[2] = 56;
  output_11->dim[3] = 56;
  output_11->dim_count = 4;
  output_11->qinfo = (struct csinn_quant_info *)(params_base + 14064);
  output_11->quant_channel = 1;
  struct csinn_tensor *kernel_11 = csinn_alloc_tensor(sess);
  kernel_11->name = "kernel_11";
  kernel_11->data = params_base + 14120;
  kernel_11->is_const = 1;
  kernel_11->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_11->dtype = CSINN_DTYPE_FLOAT16;
  kernel_11->layout = CSINN_LAYOUT_OIHW;
  kernel_11->dim[0] = 144;
  kernel_11->dim[1] = 24;
  kernel_11->dim[2] = 1;
  kernel_11->dim[3] = 1;
  kernel_11->dim_count = 4;
  kernel_11->qinfo = (struct csinn_quant_info *)(params_base + 14088);
  kernel_11->quant_channel = 1;
  struct csinn_tensor *bias_11 = csinn_alloc_tensor(sess);
  bias_11->name = "bias_11";
  bias_11->data = params_base + 21056;
  bias_11->is_const = 1;
  bias_11->dtype = CSINN_DTYPE_FLOAT16;
  bias_11->layout = CSINN_LAYOUT_O;
  bias_11->dim[0] = 144;
  bias_11->dim_count = 1;
  bias_11->qinfo = (struct csinn_quant_info *)(params_base + 21032);
  bias_11->quant_channel = 1;
  struct csinn_conv2d_params *params_11 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_11->group = 1;
  params_11->stride_height = 1;
  params_11->stride_width = 1;
  params_11->dilation_height = 1;
  params_11->dilation_width = 1;
  params_11->conv_extra.kernel_tm = NULL;
  params_11->conv_extra.conv_mode = CSINN_DIRECT;
  params_11->pad_top = 0;
  params_11->pad_left = 0;
  params_11->pad_down = 0;
  params_11->pad_right = 0;
  params_11->base.name = "conv2d_Conv_10_PART_0_16_fuse_bias_add_Conv_10_17";
  params_11->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_9, output_11, kernel_11, bias_11, params_11);
  struct csinn_tensor *output_12 = csinn_alloc_tensor(sess);
  output_12->name = "output_12";
  output_12->dtype = CSINN_DTYPE_FLOAT16;
  output_12->layout = CSINN_LAYOUT_NCHW;
  output_12->dim[0] = 1;
  output_12->dim[1] = 144;
  output_12->dim[2] = 56;
  output_12->dim[3] = 56;
  output_12->dim_count = 4;
  output_12->qinfo = (struct csinn_quant_info *)(params_base + 21344);
  output_12->quant_channel = 1;
  struct csinn_relu_params *params_12 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_12->n = 6;
  params_12->base.name = "clip_Clip_11_18";
  params_12->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_11, output_12, params_12);
  struct csinn_tensor *output_13 = csinn_alloc_tensor(sess);
  output_13->name = "output_13";
  output_13->dtype = CSINN_DTYPE_FLOAT16;
  output_13->layout = CSINN_LAYOUT_NCHW;
  output_13->dim[0] = 1;
  output_13->dim[1] = 144;
  output_13->dim[2] = 56;
  output_13->dim[3] = 56;
  output_13->dim_count = 4;
  output_13->qinfo = (struct csinn_quant_info *)(params_base + 21368);
  output_13->quant_channel = 1;
  struct csinn_tensor *kernel_13 = csinn_alloc_tensor(sess);
  kernel_13->name = "kernel_13";
  kernel_13->data = params_base + 21448;
  kernel_13->is_const = 1;
  kernel_13->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_13->dtype = CSINN_DTYPE_FLOAT16;
  kernel_13->layout = CSINN_LAYOUT_O1HW;
  kernel_13->dim[0] = 144;
  kernel_13->dim[1] = 1;
  kernel_13->dim[2] = 3;
  kernel_13->dim[3] = 3;
  kernel_13->dim_count = 4;
  kernel_13->qinfo = (struct csinn_quant_info *)(params_base + 21392);
  kernel_13->quant_channel = 1;
  struct csinn_tensor *bias_13 = csinn_alloc_tensor(sess);
  bias_13->name = "bias_13";
  bias_13->data = params_base + 24064;
  bias_13->is_const = 1;
  bias_13->dtype = CSINN_DTYPE_FLOAT16;
  bias_13->layout = CSINN_LAYOUT_O;
  bias_13->dim[0] = 144;
  bias_13->dim_count = 1;
  bias_13->qinfo = (struct csinn_quant_info *)(params_base + 24040);
  bias_13->quant_channel = 1;
  struct csinn_conv2d_params *params_13 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_13->group = 144;
  params_13->stride_height = 1;
  params_13->stride_width = 1;
  params_13->dilation_height = 1;
  params_13->dilation_width = 1;
  params_13->conv_extra.kernel_tm = NULL;
  params_13->conv_extra.conv_mode = CSINN_DIRECT;
  params_13->pad_top = 1;
  params_13->pad_left = 1;
  params_13->pad_down = 1;
  params_13->pad_right = 1;
  params_13->base.name = "conv2d_Conv_12_PART_0_19_fuse_bias_add_Conv_12_20";
  params_13->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_12, output_13, kernel_13, bias_13, params_13);
  struct csinn_tensor *output_14 = csinn_alloc_tensor(sess);
  output_14->name = "output_14";
  output_14->dtype = CSINN_DTYPE_FLOAT16;
  output_14->layout = CSINN_LAYOUT_NCHW;
  output_14->dim[0] = 1;
  output_14->dim[1] = 144;
  output_14->dim[2] = 56;
  output_14->dim[3] = 56;
  output_14->dim_count = 4;
  output_14->qinfo = (struct csinn_quant_info *)(params_base + 24352);
  output_14->quant_channel = 1;
  struct csinn_relu_params *params_14 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_14->n = 6;
  params_14->base.name = "clip_Clip_13_21";
  params_14->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_13, output_14, params_14);
  struct csinn_tensor *output_15 = csinn_alloc_tensor(sess);
  output_15->name = "output_15";
  output_15->dtype = CSINN_DTYPE_FLOAT16;
  output_15->layout = CSINN_LAYOUT_NCHW;
  output_15->dim[0] = 1;
  output_15->dim[1] = 24;
  output_15->dim[2] = 56;
  output_15->dim[3] = 56;
  output_15->dim_count = 4;
  output_15->qinfo = (struct csinn_quant_info *)(params_base + 24376);
  output_15->quant_channel = 1;
  struct csinn_tensor *kernel_15 = csinn_alloc_tensor(sess);
  kernel_15->name = "kernel_15";
  kernel_15->data = params_base + 24456;
  kernel_15->is_const = 1;
  kernel_15->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_15->dtype = CSINN_DTYPE_FLOAT16;
  kernel_15->layout = CSINN_LAYOUT_OIHW;
  kernel_15->dim[0] = 24;
  kernel_15->dim[1] = 144;
  kernel_15->dim[2] = 1;
  kernel_15->dim[3] = 1;
  kernel_15->dim_count = 4;
  kernel_15->qinfo = (struct csinn_quant_info *)(params_base + 24400);
  kernel_15->quant_channel = 1;
  struct csinn_tensor *bias_15 = csinn_alloc_tensor(sess);
  bias_15->name = "bias_15";
  bias_15->data = params_base + 31392;
  bias_15->is_const = 1;
  bias_15->dtype = CSINN_DTYPE_FLOAT16;
  bias_15->layout = CSINN_LAYOUT_O;
  bias_15->dim[0] = 24;
  bias_15->dim_count = 1;
  bias_15->qinfo = (struct csinn_quant_info *)(params_base + 31368);
  bias_15->quant_channel = 1;
  struct csinn_conv2d_params *params_15 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_15->group = 1;
  params_15->stride_height = 1;
  params_15->stride_width = 1;
  params_15->dilation_height = 1;
  params_15->dilation_width = 1;
  params_15->conv_extra.kernel_tm = NULL;
  params_15->conv_extra.conv_mode = CSINN_DIRECT;
  params_15->pad_top = 0;
  params_15->pad_left = 0;
  params_15->pad_down = 0;
  params_15->pad_right = 0;
  params_15->base.name = "conv2d_Conv_14_PART_0_22_fuse_bias_add_Conv_14_23";
  params_15->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_14, output_15, kernel_15, bias_15, params_15);
  struct csinn_tensor *output_16 = csinn_alloc_tensor(sess);
  output_16->name = "output_16";
  output_16->dtype = CSINN_DTYPE_FLOAT16;
  output_16->layout = CSINN_LAYOUT_NCHW;
  output_16->dim[0] = 1;
  output_16->dim[1] = 24;
  output_16->dim[2] = 56;
  output_16->dim[3] = 56;
  output_16->dim_count = 4;
  output_16->qinfo = (struct csinn_quant_info *)(params_base + 31440);
  output_16->quant_channel = 1;
  struct csinn_diso_params *params_16 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_16->base.name = "add_Add_15_24";
  params_16->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_9, output_15, output_16, params_16);
  struct csinn_tensor *output_18 = csinn_alloc_tensor(sess);
  output_18->name = "output_18";
  output_18->dtype = CSINN_DTYPE_FLOAT16;
  output_18->layout = CSINN_LAYOUT_NCHW;
  output_18->dim[0] = 1;
  output_18->dim[1] = 144;
  output_18->dim[2] = 56;
  output_18->dim[3] = 56;
  output_18->dim_count = 4;
  output_18->qinfo = (struct csinn_quant_info *)(params_base + 31464);
  output_18->quant_channel = 1;
  struct csinn_tensor *kernel_18 = csinn_alloc_tensor(sess);
  kernel_18->name = "kernel_18";
  kernel_18->data = params_base + 31528;
  kernel_18->is_const = 1;
  kernel_18->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_18->dtype = CSINN_DTYPE_FLOAT16;
  kernel_18->layout = CSINN_LAYOUT_OIHW;
  kernel_18->dim[0] = 144;
  kernel_18->dim[1] = 24;
  kernel_18->dim[2] = 1;
  kernel_18->dim[3] = 1;
  kernel_18->dim_count = 4;
  kernel_18->qinfo = (struct csinn_quant_info *)(params_base + 31488);
  kernel_18->quant_channel = 1;
  struct csinn_tensor *bias_18 = csinn_alloc_tensor(sess);
  bias_18->name = "bias_18";
  bias_18->data = params_base + 38464;
  bias_18->is_const = 1;
  bias_18->dtype = CSINN_DTYPE_FLOAT16;
  bias_18->layout = CSINN_LAYOUT_O;
  bias_18->dim[0] = 144;
  bias_18->dim_count = 1;
  bias_18->qinfo = (struct csinn_quant_info *)(params_base + 38440);
  bias_18->quant_channel = 1;
  struct csinn_conv2d_params *params_18 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_18->group = 1;
  params_18->stride_height = 1;
  params_18->stride_width = 1;
  params_18->dilation_height = 1;
  params_18->dilation_width = 1;
  params_18->conv_extra.kernel_tm = NULL;
  params_18->conv_extra.conv_mode = CSINN_DIRECT;
  params_18->pad_top = 0;
  params_18->pad_left = 0;
  params_18->pad_down = 0;
  params_18->pad_right = 0;
  params_18->base.name = "conv2d_Conv_16_PART_0_25_fuse_bias_add_Conv_16_26";
  params_18->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_16, output_18, kernel_18, bias_18, params_18);
  struct csinn_tensor *output_19 = csinn_alloc_tensor(sess);
  output_19->name = "output_19";
  output_19->dtype = CSINN_DTYPE_FLOAT16;
  output_19->layout = CSINN_LAYOUT_NCHW;
  output_19->dim[0] = 1;
  output_19->dim[1] = 144;
  output_19->dim[2] = 56;
  output_19->dim[3] = 56;
  output_19->dim_count = 4;
  output_19->qinfo = (struct csinn_quant_info *)(params_base + 38752);
  output_19->quant_channel = 1;
  struct csinn_relu_params *params_19 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_19->n = 6;
  params_19->base.name = "clip_Clip_17_27";
  params_19->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_18, output_19, params_19);
  struct csinn_tensor *output_20 = csinn_alloc_tensor(sess);
  output_20->name = "output_20";
  output_20->dtype = CSINN_DTYPE_FLOAT16;
  output_20->layout = CSINN_LAYOUT_NCHW;
  output_20->dim[0] = 1;
  output_20->dim[1] = 144;
  output_20->dim[2] = 28;
  output_20->dim[3] = 28;
  output_20->dim_count = 4;
  output_20->qinfo = (struct csinn_quant_info *)(params_base + 38776);
  output_20->quant_channel = 1;
  struct csinn_tensor *kernel_20 = csinn_alloc_tensor(sess);
  kernel_20->name = "kernel_20";
  kernel_20->data = params_base + 38856;
  kernel_20->is_const = 1;
  kernel_20->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_20->dtype = CSINN_DTYPE_FLOAT16;
  kernel_20->layout = CSINN_LAYOUT_O1HW;
  kernel_20->dim[0] = 144;
  kernel_20->dim[1] = 1;
  kernel_20->dim[2] = 3;
  kernel_20->dim[3] = 3;
  kernel_20->dim_count = 4;
  kernel_20->qinfo = (struct csinn_quant_info *)(params_base + 38800);
  kernel_20->quant_channel = 1;
  struct csinn_tensor *bias_20 = csinn_alloc_tensor(sess);
  bias_20->name = "bias_20";
  bias_20->data = params_base + 41472;
  bias_20->is_const = 1;
  bias_20->dtype = CSINN_DTYPE_FLOAT16;
  bias_20->layout = CSINN_LAYOUT_O;
  bias_20->dim[0] = 144;
  bias_20->dim_count = 1;
  bias_20->qinfo = (struct csinn_quant_info *)(params_base + 41448);
  bias_20->quant_channel = 1;
  struct csinn_conv2d_params *params_20 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_20->group = 144;
  params_20->stride_height = 2;
  params_20->stride_width = 2;
  params_20->dilation_height = 1;
  params_20->dilation_width = 1;
  params_20->conv_extra.kernel_tm = NULL;
  params_20->conv_extra.conv_mode = CSINN_DIRECT;
  params_20->pad_top = 1;
  params_20->pad_left = 1;
  params_20->pad_down = 1;
  params_20->pad_right = 1;
  params_20->base.name = "conv2d_Conv_18_PART_0_28_fuse_bias_add_Conv_18_29";
  params_20->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_19, output_20, kernel_20, bias_20, params_20);
  struct csinn_tensor *output_21 = csinn_alloc_tensor(sess);
  output_21->name = "output_21";
  output_21->dtype = CSINN_DTYPE_FLOAT16;
  output_21->layout = CSINN_LAYOUT_NCHW;
  output_21->dim[0] = 1;
  output_21->dim[1] = 144;
  output_21->dim[2] = 28;
  output_21->dim[3] = 28;
  output_21->dim_count = 4;
  output_21->qinfo = (struct csinn_quant_info *)(params_base + 41760);
  output_21->quant_channel = 1;
  struct csinn_relu_params *params_21 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_21->n = 6;
  params_21->base.name = "clip_Clip_19_30";
  params_21->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_20, output_21, params_21);
  struct csinn_tensor *output_22 = csinn_alloc_tensor(sess);
  output_22->name = "output_22";
  output_22->dtype = CSINN_DTYPE_FLOAT16;
  output_22->layout = CSINN_LAYOUT_NCHW;
  output_22->dim[0] = 1;
  output_22->dim[1] = 32;
  output_22->dim[2] = 28;
  output_22->dim[3] = 28;
  output_22->dim_count = 4;
  output_22->qinfo = (struct csinn_quant_info *)(params_base + 41784);
  output_22->quant_channel = 1;
  struct csinn_tensor *kernel_22 = csinn_alloc_tensor(sess);
  kernel_22->name = "kernel_22";
  kernel_22->data = params_base + 41864;
  kernel_22->is_const = 1;
  kernel_22->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_22->dtype = CSINN_DTYPE_FLOAT16;
  kernel_22->layout = CSINN_LAYOUT_OIHW;
  kernel_22->dim[0] = 32;
  kernel_22->dim[1] = 144;
  kernel_22->dim[2] = 1;
  kernel_22->dim[3] = 1;
  kernel_22->dim_count = 4;
  kernel_22->qinfo = (struct csinn_quant_info *)(params_base + 41808);
  kernel_22->quant_channel = 1;
  struct csinn_tensor *bias_22 = csinn_alloc_tensor(sess);
  bias_22->name = "bias_22";
  bias_22->data = params_base + 51104;
  bias_22->is_const = 1;
  bias_22->dtype = CSINN_DTYPE_FLOAT16;
  bias_22->layout = CSINN_LAYOUT_O;
  bias_22->dim[0] = 32;
  bias_22->dim_count = 1;
  bias_22->qinfo = (struct csinn_quant_info *)(params_base + 51080);
  bias_22->quant_channel = 1;
  struct csinn_conv2d_params *params_22 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_22->group = 1;
  params_22->stride_height = 1;
  params_22->stride_width = 1;
  params_22->dilation_height = 1;
  params_22->dilation_width = 1;
  params_22->conv_extra.kernel_tm = NULL;
  params_22->conv_extra.conv_mode = CSINN_DIRECT;
  params_22->pad_top = 0;
  params_22->pad_left = 0;
  params_22->pad_down = 0;
  params_22->pad_right = 0;
  params_22->base.name = "conv2d_Conv_20_PART_0_31_fuse_bias_add_Conv_20_32";
  params_22->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_21, output_22, kernel_22, bias_22, params_22);
  struct csinn_tensor *output_24 = csinn_alloc_tensor(sess);
  output_24->name = "output_24";
  output_24->dtype = CSINN_DTYPE_FLOAT16;
  output_24->layout = CSINN_LAYOUT_NCHW;
  output_24->dim[0] = 1;
  output_24->dim[1] = 192;
  output_24->dim[2] = 28;
  output_24->dim[3] = 28;
  output_24->dim_count = 4;
  output_24->qinfo = (struct csinn_quant_info *)(params_base + 51168);
  output_24->quant_channel = 1;
  struct csinn_tensor *kernel_24 = csinn_alloc_tensor(sess);
  kernel_24->name = "kernel_24";
  kernel_24->data = params_base + 51240;
  kernel_24->is_const = 1;
  kernel_24->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_24->dtype = CSINN_DTYPE_FLOAT16;
  kernel_24->layout = CSINN_LAYOUT_OIHW;
  kernel_24->dim[0] = 192;
  kernel_24->dim[1] = 32;
  kernel_24->dim[2] = 1;
  kernel_24->dim[3] = 1;
  kernel_24->dim_count = 4;
  kernel_24->qinfo = (struct csinn_quant_info *)(params_base + 51192);
  kernel_24->quant_channel = 1;
  struct csinn_tensor *bias_24 = csinn_alloc_tensor(sess);
  bias_24->name = "bias_24";
  bias_24->data = params_base + 63552;
  bias_24->is_const = 1;
  bias_24->dtype = CSINN_DTYPE_FLOAT16;
  bias_24->layout = CSINN_LAYOUT_O;
  bias_24->dim[0] = 192;
  bias_24->dim_count = 1;
  bias_24->qinfo = (struct csinn_quant_info *)(params_base + 63528);
  bias_24->quant_channel = 1;
  struct csinn_conv2d_params *params_24 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_24->group = 1;
  params_24->stride_height = 1;
  params_24->stride_width = 1;
  params_24->dilation_height = 1;
  params_24->dilation_width = 1;
  params_24->conv_extra.kernel_tm = NULL;
  params_24->conv_extra.conv_mode = CSINN_DIRECT;
  params_24->pad_top = 0;
  params_24->pad_left = 0;
  params_24->pad_down = 0;
  params_24->pad_right = 0;
  params_24->base.name = "conv2d_Conv_21_PART_0_33_fuse_bias_add_Conv_21_34";
  params_24->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_22, output_24, kernel_24, bias_24, params_24);
  struct csinn_tensor *output_25 = csinn_alloc_tensor(sess);
  output_25->name = "output_25";
  output_25->dtype = CSINN_DTYPE_FLOAT16;
  output_25->layout = CSINN_LAYOUT_NCHW;
  output_25->dim[0] = 1;
  output_25->dim[1] = 192;
  output_25->dim[2] = 28;
  output_25->dim[3] = 28;
  output_25->dim_count = 4;
  output_25->qinfo = (struct csinn_quant_info *)(params_base + 63936);
  output_25->quant_channel = 1;
  struct csinn_relu_params *params_25 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_25->n = 6;
  params_25->base.name = "clip_Clip_22_35";
  params_25->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_24, output_25, params_25);
  struct csinn_tensor *output_26 = csinn_alloc_tensor(sess);
  output_26->name = "output_26";
  output_26->dtype = CSINN_DTYPE_FLOAT16;
  output_26->layout = CSINN_LAYOUT_NCHW;
  output_26->dim[0] = 1;
  output_26->dim[1] = 192;
  output_26->dim[2] = 28;
  output_26->dim[3] = 28;
  output_26->dim_count = 4;
  output_26->qinfo = (struct csinn_quant_info *)(params_base + 63960);
  output_26->quant_channel = 1;
  struct csinn_tensor *kernel_26 = csinn_alloc_tensor(sess);
  kernel_26->name = "kernel_26";
  kernel_26->data = params_base + 64040;
  kernel_26->is_const = 1;
  kernel_26->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_26->dtype = CSINN_DTYPE_FLOAT16;
  kernel_26->layout = CSINN_LAYOUT_O1HW;
  kernel_26->dim[0] = 192;
  kernel_26->dim[1] = 1;
  kernel_26->dim[2] = 3;
  kernel_26->dim[3] = 3;
  kernel_26->dim_count = 4;
  kernel_26->qinfo = (struct csinn_quant_info *)(params_base + 63984);
  kernel_26->quant_channel = 1;
  struct csinn_tensor *bias_26 = csinn_alloc_tensor(sess);
  bias_26->name = "bias_26";
  bias_26->data = params_base + 67520;
  bias_26->is_const = 1;
  bias_26->dtype = CSINN_DTYPE_FLOAT16;
  bias_26->layout = CSINN_LAYOUT_O;
  bias_26->dim[0] = 192;
  bias_26->dim_count = 1;
  bias_26->qinfo = (struct csinn_quant_info *)(params_base + 67496);
  bias_26->quant_channel = 1;
  struct csinn_conv2d_params *params_26 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_26->group = 192;
  params_26->stride_height = 1;
  params_26->stride_width = 1;
  params_26->dilation_height = 1;
  params_26->dilation_width = 1;
  params_26->conv_extra.kernel_tm = NULL;
  params_26->conv_extra.conv_mode = CSINN_DIRECT;
  params_26->pad_top = 1;
  params_26->pad_left = 1;
  params_26->pad_down = 1;
  params_26->pad_right = 1;
  params_26->base.name = "conv2d_Conv_23_PART_0_36_fuse_bias_add_Conv_23_37";
  params_26->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_25, output_26, kernel_26, bias_26, params_26);
  struct csinn_tensor *output_27 = csinn_alloc_tensor(sess);
  output_27->name = "output_27";
  output_27->dtype = CSINN_DTYPE_FLOAT16;
  output_27->layout = CSINN_LAYOUT_NCHW;
  output_27->dim[0] = 1;
  output_27->dim[1] = 192;
  output_27->dim[2] = 28;
  output_27->dim[3] = 28;
  output_27->dim_count = 4;
  output_27->qinfo = (struct csinn_quant_info *)(params_base + 67904);
  output_27->quant_channel = 1;
  struct csinn_relu_params *params_27 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_27->n = 6;
  params_27->base.name = "clip_Clip_24_38";
  params_27->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_26, output_27, params_27);
  struct csinn_tensor *output_28 = csinn_alloc_tensor(sess);
  output_28->name = "output_28";
  output_28->dtype = CSINN_DTYPE_FLOAT16;
  output_28->layout = CSINN_LAYOUT_NCHW;
  output_28->dim[0] = 1;
  output_28->dim[1] = 32;
  output_28->dim[2] = 28;
  output_28->dim[3] = 28;
  output_28->dim_count = 4;
  output_28->qinfo = (struct csinn_quant_info *)(params_base + 67928);
  output_28->quant_channel = 1;
  struct csinn_tensor *kernel_28 = csinn_alloc_tensor(sess);
  kernel_28->name = "kernel_28";
  kernel_28->data = params_base + 68008;
  kernel_28->is_const = 1;
  kernel_28->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_28->dtype = CSINN_DTYPE_FLOAT16;
  kernel_28->layout = CSINN_LAYOUT_OIHW;
  kernel_28->dim[0] = 32;
  kernel_28->dim[1] = 192;
  kernel_28->dim[2] = 1;
  kernel_28->dim[3] = 1;
  kernel_28->dim_count = 4;
  kernel_28->qinfo = (struct csinn_quant_info *)(params_base + 67952);
  kernel_28->quant_channel = 1;
  struct csinn_tensor *bias_28 = csinn_alloc_tensor(sess);
  bias_28->name = "bias_28";
  bias_28->data = params_base + 80320;
  bias_28->is_const = 1;
  bias_28->dtype = CSINN_DTYPE_FLOAT16;
  bias_28->layout = CSINN_LAYOUT_O;
  bias_28->dim[0] = 32;
  bias_28->dim_count = 1;
  bias_28->qinfo = (struct csinn_quant_info *)(params_base + 80296);
  bias_28->quant_channel = 1;
  struct csinn_conv2d_params *params_28 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_28->group = 1;
  params_28->stride_height = 1;
  params_28->stride_width = 1;
  params_28->dilation_height = 1;
  params_28->dilation_width = 1;
  params_28->conv_extra.kernel_tm = NULL;
  params_28->conv_extra.conv_mode = CSINN_DIRECT;
  params_28->pad_top = 0;
  params_28->pad_left = 0;
  params_28->pad_down = 0;
  params_28->pad_right = 0;
  params_28->base.name = "conv2d_Conv_25_PART_0_39_fuse_bias_add_Conv_25_40";
  params_28->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_27, output_28, kernel_28, bias_28, params_28);
  struct csinn_tensor *output_29 = csinn_alloc_tensor(sess);
  output_29->name = "output_29";
  output_29->dtype = CSINN_DTYPE_FLOAT16;
  output_29->layout = CSINN_LAYOUT_NCHW;
  output_29->dim[0] = 1;
  output_29->dim[1] = 32;
  output_29->dim[2] = 28;
  output_29->dim[3] = 28;
  output_29->dim_count = 4;
  output_29->qinfo = (struct csinn_quant_info *)(params_base + 80384);
  output_29->quant_channel = 1;
  struct csinn_diso_params *params_29 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_29->base.name = "add_Add_26_41";
  params_29->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_22, output_28, output_29, params_29);
  struct csinn_tensor *output_32 = csinn_alloc_tensor(sess);
  output_32->name = "output_32";
  output_32->dtype = CSINN_DTYPE_FLOAT16;
  output_32->layout = CSINN_LAYOUT_NCHW;
  output_32->dim[0] = 1;
  output_32->dim[1] = 192;
  output_32->dim[2] = 28;
  output_32->dim[3] = 28;
  output_32->dim_count = 4;
  output_32->qinfo = (struct csinn_quant_info *)(params_base + 80408);
  output_32->quant_channel = 1;
  struct csinn_tensor *kernel_32 = csinn_alloc_tensor(sess);
  kernel_32->name = "kernel_32";
  kernel_32->data = params_base + 80488;
  kernel_32->is_const = 1;
  kernel_32->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_32->dtype = CSINN_DTYPE_FLOAT16;
  kernel_32->layout = CSINN_LAYOUT_OIHW;
  kernel_32->dim[0] = 192;
  kernel_32->dim[1] = 32;
  kernel_32->dim[2] = 1;
  kernel_32->dim[3] = 1;
  kernel_32->dim_count = 4;
  kernel_32->qinfo = (struct csinn_quant_info *)(params_base + 80432);
  kernel_32->quant_channel = 1;
  struct csinn_tensor *bias_32 = csinn_alloc_tensor(sess);
  bias_32->name = "bias_32";
  bias_32->data = params_base + 92800;
  bias_32->is_const = 1;
  bias_32->dtype = CSINN_DTYPE_FLOAT16;
  bias_32->layout = CSINN_LAYOUT_O;
  bias_32->dim[0] = 192;
  bias_32->dim_count = 1;
  bias_32->qinfo = (struct csinn_quant_info *)(params_base + 92776);
  bias_32->quant_channel = 1;
  struct csinn_conv2d_params *params_32 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_32->group = 1;
  params_32->stride_height = 1;
  params_32->stride_width = 1;
  params_32->dilation_height = 1;
  params_32->dilation_width = 1;
  params_32->conv_extra.kernel_tm = NULL;
  params_32->conv_extra.conv_mode = CSINN_DIRECT;
  params_32->pad_top = 0;
  params_32->pad_left = 0;
  params_32->pad_down = 0;
  params_32->pad_right = 0;
  params_32->base.name = "conv2d_Conv_27_PART_0_42_fuse_bias_add_Conv_27_43";
  params_32->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_29, output_32, kernel_32, bias_32, params_32);
  struct csinn_tensor *output_33 = csinn_alloc_tensor(sess);
  output_33->name = "output_33";
  output_33->dtype = CSINN_DTYPE_FLOAT16;
  output_33->layout = CSINN_LAYOUT_NCHW;
  output_33->dim[0] = 1;
  output_33->dim[1] = 192;
  output_33->dim[2] = 28;
  output_33->dim[3] = 28;
  output_33->dim_count = 4;
  output_33->qinfo = (struct csinn_quant_info *)(params_base + 93184);
  output_33->quant_channel = 1;
  struct csinn_relu_params *params_33 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_33->n = 6;
  params_33->base.name = "clip_Clip_28_44";
  params_33->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_32, output_33, params_33);
  struct csinn_tensor *output_34 = csinn_alloc_tensor(sess);
  output_34->name = "output_34";
  output_34->dtype = CSINN_DTYPE_FLOAT16;
  output_34->layout = CSINN_LAYOUT_NCHW;
  output_34->dim[0] = 1;
  output_34->dim[1] = 192;
  output_34->dim[2] = 28;
  output_34->dim[3] = 28;
  output_34->dim_count = 4;
  output_34->qinfo = (struct csinn_quant_info *)(params_base + 93208);
  output_34->quant_channel = 1;
  struct csinn_tensor *kernel_34 = csinn_alloc_tensor(sess);
  kernel_34->name = "kernel_34";
  kernel_34->data = params_base + 93288;
  kernel_34->is_const = 1;
  kernel_34->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_34->dtype = CSINN_DTYPE_FLOAT16;
  kernel_34->layout = CSINN_LAYOUT_O1HW;
  kernel_34->dim[0] = 192;
  kernel_34->dim[1] = 1;
  kernel_34->dim[2] = 3;
  kernel_34->dim[3] = 3;
  kernel_34->dim_count = 4;
  kernel_34->qinfo = (struct csinn_quant_info *)(params_base + 93232);
  kernel_34->quant_channel = 1;
  struct csinn_tensor *bias_34 = csinn_alloc_tensor(sess);
  bias_34->name = "bias_34";
  bias_34->data = params_base + 96768;
  bias_34->is_const = 1;
  bias_34->dtype = CSINN_DTYPE_FLOAT16;
  bias_34->layout = CSINN_LAYOUT_O;
  bias_34->dim[0] = 192;
  bias_34->dim_count = 1;
  bias_34->qinfo = (struct csinn_quant_info *)(params_base + 96744);
  bias_34->quant_channel = 1;
  struct csinn_conv2d_params *params_34 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_34->group = 192;
  params_34->stride_height = 1;
  params_34->stride_width = 1;
  params_34->dilation_height = 1;
  params_34->dilation_width = 1;
  params_34->conv_extra.kernel_tm = NULL;
  params_34->conv_extra.conv_mode = CSINN_DIRECT;
  params_34->pad_top = 1;
  params_34->pad_left = 1;
  params_34->pad_down = 1;
  params_34->pad_right = 1;
  params_34->base.name = "conv2d_Conv_29_PART_0_45_fuse_bias_add_Conv_29_46";
  params_34->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_33, output_34, kernel_34, bias_34, params_34);
  struct csinn_tensor *output_35 = csinn_alloc_tensor(sess);
  output_35->name = "output_35";
  output_35->dtype = CSINN_DTYPE_FLOAT16;
  output_35->layout = CSINN_LAYOUT_NCHW;
  output_35->dim[0] = 1;
  output_35->dim[1] = 192;
  output_35->dim[2] = 28;
  output_35->dim[3] = 28;
  output_35->dim_count = 4;
  output_35->qinfo = (struct csinn_quant_info *)(params_base + 97152);
  output_35->quant_channel = 1;
  struct csinn_relu_params *params_35 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_35->n = 6;
  params_35->base.name = "clip_Clip_30_47";
  params_35->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_34, output_35, params_35);
  struct csinn_tensor *output_36 = csinn_alloc_tensor(sess);
  output_36->name = "output_36";
  output_36->dtype = CSINN_DTYPE_FLOAT16;
  output_36->layout = CSINN_LAYOUT_NCHW;
  output_36->dim[0] = 1;
  output_36->dim[1] = 32;
  output_36->dim[2] = 28;
  output_36->dim[3] = 28;
  output_36->dim_count = 4;
  output_36->qinfo = (struct csinn_quant_info *)(params_base + 97176);
  output_36->quant_channel = 1;
  struct csinn_tensor *kernel_36 = csinn_alloc_tensor(sess);
  kernel_36->name = "kernel_36";
  kernel_36->data = params_base + 97256;
  kernel_36->is_const = 1;
  kernel_36->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_36->dtype = CSINN_DTYPE_FLOAT16;
  kernel_36->layout = CSINN_LAYOUT_OIHW;
  kernel_36->dim[0] = 32;
  kernel_36->dim[1] = 192;
  kernel_36->dim[2] = 1;
  kernel_36->dim[3] = 1;
  kernel_36->dim_count = 4;
  kernel_36->qinfo = (struct csinn_quant_info *)(params_base + 97200);
  kernel_36->quant_channel = 1;
  struct csinn_tensor *bias_36 = csinn_alloc_tensor(sess);
  bias_36->name = "bias_36";
  bias_36->data = params_base + 109568;
  bias_36->is_const = 1;
  bias_36->dtype = CSINN_DTYPE_FLOAT16;
  bias_36->layout = CSINN_LAYOUT_O;
  bias_36->dim[0] = 32;
  bias_36->dim_count = 1;
  bias_36->qinfo = (struct csinn_quant_info *)(params_base + 109544);
  bias_36->quant_channel = 1;
  struct csinn_conv2d_params *params_36 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_36->group = 1;
  params_36->stride_height = 1;
  params_36->stride_width = 1;
  params_36->dilation_height = 1;
  params_36->dilation_width = 1;
  params_36->conv_extra.kernel_tm = NULL;
  params_36->conv_extra.conv_mode = CSINN_DIRECT;
  params_36->pad_top = 0;
  params_36->pad_left = 0;
  params_36->pad_down = 0;
  params_36->pad_right = 0;
  params_36->base.name = "conv2d_Conv_31_PART_0_48_fuse_bias_add_Conv_31_49";
  params_36->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_35, output_36, kernel_36, bias_36, params_36);
  struct csinn_tensor *output_37 = csinn_alloc_tensor(sess);
  output_37->name = "output_37";
  output_37->dtype = CSINN_DTYPE_FLOAT16;
  output_37->layout = CSINN_LAYOUT_NCHW;
  output_37->dim[0] = 1;
  output_37->dim[1] = 32;
  output_37->dim[2] = 28;
  output_37->dim[3] = 28;
  output_37->dim_count = 4;
  output_37->qinfo = (struct csinn_quant_info *)(params_base + 109632);
  output_37->quant_channel = 1;
  struct csinn_diso_params *params_37 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_37->base.name = "add_Add_32_50";
  params_37->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_29, output_36, output_37, params_37);
  struct csinn_tensor *output_39 = csinn_alloc_tensor(sess);
  output_39->name = "output_39";
  output_39->dtype = CSINN_DTYPE_FLOAT16;
  output_39->layout = CSINN_LAYOUT_NCHW;
  output_39->dim[0] = 1;
  output_39->dim[1] = 192;
  output_39->dim[2] = 28;
  output_39->dim[3] = 28;
  output_39->dim_count = 4;
  output_39->qinfo = (struct csinn_quant_info *)(params_base + 109656);
  output_39->quant_channel = 1;
  struct csinn_tensor *kernel_39 = csinn_alloc_tensor(sess);
  kernel_39->name = "kernel_39";
  kernel_39->data = params_base + 109736;
  kernel_39->is_const = 1;
  kernel_39->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_39->dtype = CSINN_DTYPE_FLOAT16;
  kernel_39->layout = CSINN_LAYOUT_OIHW;
  kernel_39->dim[0] = 192;
  kernel_39->dim[1] = 32;
  kernel_39->dim[2] = 1;
  kernel_39->dim[3] = 1;
  kernel_39->dim_count = 4;
  kernel_39->qinfo = (struct csinn_quant_info *)(params_base + 109680);
  kernel_39->quant_channel = 1;
  struct csinn_tensor *bias_39 = csinn_alloc_tensor(sess);
  bias_39->name = "bias_39";
  bias_39->data = params_base + 122048;
  bias_39->is_const = 1;
  bias_39->dtype = CSINN_DTYPE_FLOAT16;
  bias_39->layout = CSINN_LAYOUT_O;
  bias_39->dim[0] = 192;
  bias_39->dim_count = 1;
  bias_39->qinfo = (struct csinn_quant_info *)(params_base + 122024);
  bias_39->quant_channel = 1;
  struct csinn_conv2d_params *params_39 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_39->group = 1;
  params_39->stride_height = 1;
  params_39->stride_width = 1;
  params_39->dilation_height = 1;
  params_39->dilation_width = 1;
  params_39->conv_extra.kernel_tm = NULL;
  params_39->conv_extra.conv_mode = CSINN_DIRECT;
  params_39->pad_top = 0;
  params_39->pad_left = 0;
  params_39->pad_down = 0;
  params_39->pad_right = 0;
  params_39->base.name = "conv2d_Conv_33_PART_0_51_fuse_bias_add_Conv_33_52";
  params_39->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_37, output_39, kernel_39, bias_39, params_39);
  struct csinn_tensor *output_40 = csinn_alloc_tensor(sess);
  output_40->name = "output_40";
  output_40->dtype = CSINN_DTYPE_FLOAT16;
  output_40->layout = CSINN_LAYOUT_NCHW;
  output_40->dim[0] = 1;
  output_40->dim[1] = 192;
  output_40->dim[2] = 28;
  output_40->dim[3] = 28;
  output_40->dim_count = 4;
  output_40->qinfo = (struct csinn_quant_info *)(params_base + 122432);
  output_40->quant_channel = 1;
  struct csinn_relu_params *params_40 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_40->n = 6;
  params_40->base.name = "clip_Clip_34_53";
  params_40->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_39, output_40, params_40);
  struct csinn_tensor *output_41 = csinn_alloc_tensor(sess);
  output_41->name = "output_41";
  output_41->dtype = CSINN_DTYPE_FLOAT16;
  output_41->layout = CSINN_LAYOUT_NCHW;
  output_41->dim[0] = 1;
  output_41->dim[1] = 192;
  output_41->dim[2] = 14;
  output_41->dim[3] = 14;
  output_41->dim_count = 4;
  output_41->qinfo = (struct csinn_quant_info *)(params_base + 122456);
  output_41->quant_channel = 1;
  struct csinn_tensor *kernel_41 = csinn_alloc_tensor(sess);
  kernel_41->name = "kernel_41";
  kernel_41->data = params_base + 122536;
  kernel_41->is_const = 1;
  kernel_41->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_41->dtype = CSINN_DTYPE_FLOAT16;
  kernel_41->layout = CSINN_LAYOUT_O1HW;
  kernel_41->dim[0] = 192;
  kernel_41->dim[1] = 1;
  kernel_41->dim[2] = 3;
  kernel_41->dim[3] = 3;
  kernel_41->dim_count = 4;
  kernel_41->qinfo = (struct csinn_quant_info *)(params_base + 122480);
  kernel_41->quant_channel = 1;
  struct csinn_tensor *bias_41 = csinn_alloc_tensor(sess);
  bias_41->name = "bias_41";
  bias_41->data = params_base + 126016;
  bias_41->is_const = 1;
  bias_41->dtype = CSINN_DTYPE_FLOAT16;
  bias_41->layout = CSINN_LAYOUT_O;
  bias_41->dim[0] = 192;
  bias_41->dim_count = 1;
  bias_41->qinfo = (struct csinn_quant_info *)(params_base + 125992);
  bias_41->quant_channel = 1;
  struct csinn_conv2d_params *params_41 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_41->group = 192;
  params_41->stride_height = 2;
  params_41->stride_width = 2;
  params_41->dilation_height = 1;
  params_41->dilation_width = 1;
  params_41->conv_extra.kernel_tm = NULL;
  params_41->conv_extra.conv_mode = CSINN_DIRECT;
  params_41->pad_top = 1;
  params_41->pad_left = 1;
  params_41->pad_down = 1;
  params_41->pad_right = 1;
  params_41->base.name = "conv2d_Conv_35_PART_0_54_fuse_bias_add_Conv_35_55";
  params_41->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_40, output_41, kernel_41, bias_41, params_41);
  struct csinn_tensor *output_42 = csinn_alloc_tensor(sess);
  output_42->name = "output_42";
  output_42->dtype = CSINN_DTYPE_FLOAT16;
  output_42->layout = CSINN_LAYOUT_NCHW;
  output_42->dim[0] = 1;
  output_42->dim[1] = 192;
  output_42->dim[2] = 14;
  output_42->dim[3] = 14;
  output_42->dim_count = 4;
  output_42->qinfo = (struct csinn_quant_info *)(params_base + 126400);
  output_42->quant_channel = 1;
  struct csinn_relu_params *params_42 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_42->n = 6;
  params_42->base.name = "clip_Clip_36_56";
  params_42->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_41, output_42, params_42);
  struct csinn_tensor *output_43 = csinn_alloc_tensor(sess);
  output_43->name = "output_43";
  output_43->dtype = CSINN_DTYPE_FLOAT16;
  output_43->layout = CSINN_LAYOUT_NCHW;
  output_43->dim[0] = 1;
  output_43->dim[1] = 64;
  output_43->dim[2] = 14;
  output_43->dim[3] = 14;
  output_43->dim_count = 4;
  output_43->qinfo = (struct csinn_quant_info *)(params_base + 126424);
  output_43->quant_channel = 1;
  struct csinn_tensor *kernel_43 = csinn_alloc_tensor(sess);
  kernel_43->name = "kernel_43";
  kernel_43->data = params_base + 126504;
  kernel_43->is_const = 1;
  kernel_43->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_43->dtype = CSINN_DTYPE_FLOAT16;
  kernel_43->layout = CSINN_LAYOUT_OIHW;
  kernel_43->dim[0] = 64;
  kernel_43->dim[1] = 192;
  kernel_43->dim[2] = 1;
  kernel_43->dim[3] = 1;
  kernel_43->dim_count = 4;
  kernel_43->qinfo = (struct csinn_quant_info *)(params_base + 126448);
  kernel_43->quant_channel = 1;
  struct csinn_tensor *bias_43 = csinn_alloc_tensor(sess);
  bias_43->name = "bias_43";
  bias_43->data = params_base + 151104;
  bias_43->is_const = 1;
  bias_43->dtype = CSINN_DTYPE_FLOAT16;
  bias_43->layout = CSINN_LAYOUT_O;
  bias_43->dim[0] = 64;
  bias_43->dim_count = 1;
  bias_43->qinfo = (struct csinn_quant_info *)(params_base + 151080);
  bias_43->quant_channel = 1;
  struct csinn_conv2d_params *params_43 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_43->group = 1;
  params_43->stride_height = 1;
  params_43->stride_width = 1;
  params_43->dilation_height = 1;
  params_43->dilation_width = 1;
  params_43->conv_extra.kernel_tm = NULL;
  params_43->conv_extra.conv_mode = CSINN_DIRECT;
  params_43->pad_top = 0;
  params_43->pad_left = 0;
  params_43->pad_down = 0;
  params_43->pad_right = 0;
  params_43->base.name = "conv2d_Conv_37_PART_0_57_fuse_bias_add_Conv_37_58";
  params_43->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_42, output_43, kernel_43, bias_43, params_43);
  struct csinn_tensor *output_45 = csinn_alloc_tensor(sess);
  output_45->name = "output_45";
  output_45->dtype = CSINN_DTYPE_FLOAT16;
  output_45->layout = CSINN_LAYOUT_NCHW;
  output_45->dim[0] = 1;
  output_45->dim[1] = 384;
  output_45->dim[2] = 14;
  output_45->dim[3] = 14;
  output_45->dim_count = 4;
  output_45->qinfo = (struct csinn_quant_info *)(params_base + 151232);
  output_45->quant_channel = 1;
  struct csinn_tensor *kernel_45 = csinn_alloc_tensor(sess);
  kernel_45->name = "kernel_45";
  kernel_45->data = params_base + 151304;
  kernel_45->is_const = 1;
  kernel_45->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_45->dtype = CSINN_DTYPE_FLOAT16;
  kernel_45->layout = CSINN_LAYOUT_OIHW;
  kernel_45->dim[0] = 384;
  kernel_45->dim[1] = 64;
  kernel_45->dim[2] = 1;
  kernel_45->dim[3] = 1;
  kernel_45->dim_count = 4;
  kernel_45->qinfo = (struct csinn_quant_info *)(params_base + 151256);
  kernel_45->quant_channel = 1;
  struct csinn_tensor *bias_45 = csinn_alloc_tensor(sess);
  bias_45->name = "bias_45";
  bias_45->data = params_base + 200480;
  bias_45->is_const = 1;
  bias_45->dtype = CSINN_DTYPE_FLOAT16;
  bias_45->layout = CSINN_LAYOUT_O;
  bias_45->dim[0] = 384;
  bias_45->dim_count = 1;
  bias_45->qinfo = (struct csinn_quant_info *)(params_base + 200456);
  bias_45->quant_channel = 1;
  struct csinn_conv2d_params *params_45 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_45->group = 1;
  params_45->stride_height = 1;
  params_45->stride_width = 1;
  params_45->dilation_height = 1;
  params_45->dilation_width = 1;
  params_45->conv_extra.kernel_tm = NULL;
  params_45->conv_extra.conv_mode = CSINN_DIRECT;
  params_45->pad_top = 0;
  params_45->pad_left = 0;
  params_45->pad_down = 0;
  params_45->pad_right = 0;
  params_45->base.name = "conv2d_Conv_38_PART_0_59_fuse_bias_add_Conv_38_60";
  params_45->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_43, output_45, kernel_45, bias_45, params_45);
  struct csinn_tensor *output_46 = csinn_alloc_tensor(sess);
  output_46->name = "output_46";
  output_46->dtype = CSINN_DTYPE_FLOAT16;
  output_46->layout = CSINN_LAYOUT_NCHW;
  output_46->dim[0] = 1;
  output_46->dim[1] = 384;
  output_46->dim[2] = 14;
  output_46->dim[3] = 14;
  output_46->dim_count = 4;
  output_46->qinfo = (struct csinn_quant_info *)(params_base + 201248);
  output_46->quant_channel = 1;
  struct csinn_relu_params *params_46 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_46->n = 6;
  params_46->base.name = "clip_Clip_39_61";
  params_46->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_45, output_46, params_46);
  struct csinn_tensor *output_47 = csinn_alloc_tensor(sess);
  output_47->name = "output_47";
  output_47->dtype = CSINN_DTYPE_FLOAT16;
  output_47->layout = CSINN_LAYOUT_NCHW;
  output_47->dim[0] = 1;
  output_47->dim[1] = 384;
  output_47->dim[2] = 14;
  output_47->dim[3] = 14;
  output_47->dim_count = 4;
  output_47->qinfo = (struct csinn_quant_info *)(params_base + 201272);
  output_47->quant_channel = 1;
  struct csinn_tensor *kernel_47 = csinn_alloc_tensor(sess);
  kernel_47->name = "kernel_47";
  kernel_47->data = params_base + 201352;
  kernel_47->is_const = 1;
  kernel_47->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_47->dtype = CSINN_DTYPE_FLOAT16;
  kernel_47->layout = CSINN_LAYOUT_O1HW;
  kernel_47->dim[0] = 384;
  kernel_47->dim[1] = 1;
  kernel_47->dim[2] = 3;
  kernel_47->dim[3] = 3;
  kernel_47->dim_count = 4;
  kernel_47->qinfo = (struct csinn_quant_info *)(params_base + 201296);
  kernel_47->quant_channel = 1;
  struct csinn_tensor *bias_47 = csinn_alloc_tensor(sess);
  bias_47->name = "bias_47";
  bias_47->data = params_base + 208288;
  bias_47->is_const = 1;
  bias_47->dtype = CSINN_DTYPE_FLOAT16;
  bias_47->layout = CSINN_LAYOUT_O;
  bias_47->dim[0] = 384;
  bias_47->dim_count = 1;
  bias_47->qinfo = (struct csinn_quant_info *)(params_base + 208264);
  bias_47->quant_channel = 1;
  struct csinn_conv2d_params *params_47 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_47->group = 384;
  params_47->stride_height = 1;
  params_47->stride_width = 1;
  params_47->dilation_height = 1;
  params_47->dilation_width = 1;
  params_47->conv_extra.kernel_tm = NULL;
  params_47->conv_extra.conv_mode = CSINN_DIRECT;
  params_47->pad_top = 1;
  params_47->pad_left = 1;
  params_47->pad_down = 1;
  params_47->pad_right = 1;
  params_47->base.name = "conv2d_Conv_40_PART_0_62_fuse_bias_add_Conv_40_63";
  params_47->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_46, output_47, kernel_47, bias_47, params_47);
  struct csinn_tensor *output_48 = csinn_alloc_tensor(sess);
  output_48->name = "output_48";
  output_48->dtype = CSINN_DTYPE_FLOAT16;
  output_48->layout = CSINN_LAYOUT_NCHW;
  output_48->dim[0] = 1;
  output_48->dim[1] = 384;
  output_48->dim[2] = 14;
  output_48->dim[3] = 14;
  output_48->dim_count = 4;
  output_48->qinfo = (struct csinn_quant_info *)(params_base + 209056);
  output_48->quant_channel = 1;
  struct csinn_relu_params *params_48 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_48->n = 6;
  params_48->base.name = "clip_Clip_41_64";
  params_48->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_47, output_48, params_48);
  struct csinn_tensor *output_49 = csinn_alloc_tensor(sess);
  output_49->name = "output_49";
  output_49->dtype = CSINN_DTYPE_FLOAT16;
  output_49->layout = CSINN_LAYOUT_NCHW;
  output_49->dim[0] = 1;
  output_49->dim[1] = 64;
  output_49->dim[2] = 14;
  output_49->dim[3] = 14;
  output_49->dim_count = 4;
  output_49->qinfo = (struct csinn_quant_info *)(params_base + 209080);
  output_49->quant_channel = 1;
  struct csinn_tensor *kernel_49 = csinn_alloc_tensor(sess);
  kernel_49->name = "kernel_49";
  kernel_49->data = params_base + 209160;
  kernel_49->is_const = 1;
  kernel_49->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_49->dtype = CSINN_DTYPE_FLOAT16;
  kernel_49->layout = CSINN_LAYOUT_OIHW;
  kernel_49->dim[0] = 64;
  kernel_49->dim[1] = 384;
  kernel_49->dim[2] = 1;
  kernel_49->dim[3] = 1;
  kernel_49->dim_count = 4;
  kernel_49->qinfo = (struct csinn_quant_info *)(params_base + 209104);
  kernel_49->quant_channel = 1;
  struct csinn_tensor *bias_49 = csinn_alloc_tensor(sess);
  bias_49->name = "bias_49";
  bias_49->data = params_base + 258336;
  bias_49->is_const = 1;
  bias_49->dtype = CSINN_DTYPE_FLOAT16;
  bias_49->layout = CSINN_LAYOUT_O;
  bias_49->dim[0] = 64;
  bias_49->dim_count = 1;
  bias_49->qinfo = (struct csinn_quant_info *)(params_base + 258312);
  bias_49->quant_channel = 1;
  struct csinn_conv2d_params *params_49 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_49->group = 1;
  params_49->stride_height = 1;
  params_49->stride_width = 1;
  params_49->dilation_height = 1;
  params_49->dilation_width = 1;
  params_49->conv_extra.kernel_tm = NULL;
  params_49->conv_extra.conv_mode = CSINN_DIRECT;
  params_49->pad_top = 0;
  params_49->pad_left = 0;
  params_49->pad_down = 0;
  params_49->pad_right = 0;
  params_49->base.name = "conv2d_Conv_42_PART_0_65_fuse_bias_add_Conv_42_66";
  params_49->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_48, output_49, kernel_49, bias_49, params_49);
  struct csinn_tensor *output_50 = csinn_alloc_tensor(sess);
  output_50->name = "output_50";
  output_50->dtype = CSINN_DTYPE_FLOAT16;
  output_50->layout = CSINN_LAYOUT_NCHW;
  output_50->dim[0] = 1;
  output_50->dim[1] = 64;
  output_50->dim[2] = 14;
  output_50->dim[3] = 14;
  output_50->dim_count = 4;
  output_50->qinfo = (struct csinn_quant_info *)(params_base + 258464);
  output_50->quant_channel = 1;
  struct csinn_diso_params *params_50 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_50->base.name = "add_Add_43_67";
  params_50->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_43, output_49, output_50, params_50);
  struct csinn_tensor *output_53 = csinn_alloc_tensor(sess);
  output_53->name = "output_53";
  output_53->dtype = CSINN_DTYPE_FLOAT16;
  output_53->layout = CSINN_LAYOUT_NCHW;
  output_53->dim[0] = 1;
  output_53->dim[1] = 384;
  output_53->dim[2] = 14;
  output_53->dim[3] = 14;
  output_53->dim_count = 4;
  output_53->qinfo = (struct csinn_quant_info *)(params_base + 258488);
  output_53->quant_channel = 1;
  struct csinn_tensor *kernel_53 = csinn_alloc_tensor(sess);
  kernel_53->name = "kernel_53";
  kernel_53->data = params_base + 258568;
  kernel_53->is_const = 1;
  kernel_53->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_53->dtype = CSINN_DTYPE_FLOAT16;
  kernel_53->layout = CSINN_LAYOUT_OIHW;
  kernel_53->dim[0] = 384;
  kernel_53->dim[1] = 64;
  kernel_53->dim[2] = 1;
  kernel_53->dim[3] = 1;
  kernel_53->dim_count = 4;
  kernel_53->qinfo = (struct csinn_quant_info *)(params_base + 258512);
  kernel_53->quant_channel = 1;
  struct csinn_tensor *bias_53 = csinn_alloc_tensor(sess);
  bias_53->name = "bias_53";
  bias_53->data = params_base + 307744;
  bias_53->is_const = 1;
  bias_53->dtype = CSINN_DTYPE_FLOAT16;
  bias_53->layout = CSINN_LAYOUT_O;
  bias_53->dim[0] = 384;
  bias_53->dim_count = 1;
  bias_53->qinfo = (struct csinn_quant_info *)(params_base + 307720);
  bias_53->quant_channel = 1;
  struct csinn_conv2d_params *params_53 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_53->group = 1;
  params_53->stride_height = 1;
  params_53->stride_width = 1;
  params_53->dilation_height = 1;
  params_53->dilation_width = 1;
  params_53->conv_extra.kernel_tm = NULL;
  params_53->conv_extra.conv_mode = CSINN_DIRECT;
  params_53->pad_top = 0;
  params_53->pad_left = 0;
  params_53->pad_down = 0;
  params_53->pad_right = 0;
  params_53->base.name = "conv2d_Conv_44_PART_0_68_fuse_bias_add_Conv_44_69";
  params_53->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_50, output_53, kernel_53, bias_53, params_53);
  struct csinn_tensor *output_54 = csinn_alloc_tensor(sess);
  output_54->name = "output_54";
  output_54->dtype = CSINN_DTYPE_FLOAT16;
  output_54->layout = CSINN_LAYOUT_NCHW;
  output_54->dim[0] = 1;
  output_54->dim[1] = 384;
  output_54->dim[2] = 14;
  output_54->dim[3] = 14;
  output_54->dim_count = 4;
  output_54->qinfo = (struct csinn_quant_info *)(params_base + 308512);
  output_54->quant_channel = 1;
  struct csinn_relu_params *params_54 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_54->n = 6;
  params_54->base.name = "clip_Clip_45_70";
  params_54->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_53, output_54, params_54);
  struct csinn_tensor *output_55 = csinn_alloc_tensor(sess);
  output_55->name = "output_55";
  output_55->dtype = CSINN_DTYPE_FLOAT16;
  output_55->layout = CSINN_LAYOUT_NCHW;
  output_55->dim[0] = 1;
  output_55->dim[1] = 384;
  output_55->dim[2] = 14;
  output_55->dim[3] = 14;
  output_55->dim_count = 4;
  output_55->qinfo = (struct csinn_quant_info *)(params_base + 308536);
  output_55->quant_channel = 1;
  struct csinn_tensor *kernel_55 = csinn_alloc_tensor(sess);
  kernel_55->name = "kernel_55";
  kernel_55->data = params_base + 308616;
  kernel_55->is_const = 1;
  kernel_55->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_55->dtype = CSINN_DTYPE_FLOAT16;
  kernel_55->layout = CSINN_LAYOUT_O1HW;
  kernel_55->dim[0] = 384;
  kernel_55->dim[1] = 1;
  kernel_55->dim[2] = 3;
  kernel_55->dim[3] = 3;
  kernel_55->dim_count = 4;
  kernel_55->qinfo = (struct csinn_quant_info *)(params_base + 308560);
  kernel_55->quant_channel = 1;
  struct csinn_tensor *bias_55 = csinn_alloc_tensor(sess);
  bias_55->name = "bias_55";
  bias_55->data = params_base + 315552;
  bias_55->is_const = 1;
  bias_55->dtype = CSINN_DTYPE_FLOAT16;
  bias_55->layout = CSINN_LAYOUT_O;
  bias_55->dim[0] = 384;
  bias_55->dim_count = 1;
  bias_55->qinfo = (struct csinn_quant_info *)(params_base + 315528);
  bias_55->quant_channel = 1;
  struct csinn_conv2d_params *params_55 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_55->group = 384;
  params_55->stride_height = 1;
  params_55->stride_width = 1;
  params_55->dilation_height = 1;
  params_55->dilation_width = 1;
  params_55->conv_extra.kernel_tm = NULL;
  params_55->conv_extra.conv_mode = CSINN_DIRECT;
  params_55->pad_top = 1;
  params_55->pad_left = 1;
  params_55->pad_down = 1;
  params_55->pad_right = 1;
  params_55->base.name = "conv2d_Conv_46_PART_0_71_fuse_bias_add_Conv_46_72";
  params_55->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_54, output_55, kernel_55, bias_55, params_55);
  struct csinn_tensor *output_56 = csinn_alloc_tensor(sess);
  output_56->name = "output_56";
  output_56->dtype = CSINN_DTYPE_FLOAT16;
  output_56->layout = CSINN_LAYOUT_NCHW;
  output_56->dim[0] = 1;
  output_56->dim[1] = 384;
  output_56->dim[2] = 14;
  output_56->dim[3] = 14;
  output_56->dim_count = 4;
  output_56->qinfo = (struct csinn_quant_info *)(params_base + 316320);
  output_56->quant_channel = 1;
  struct csinn_relu_params *params_56 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_56->n = 6;
  params_56->base.name = "clip_Clip_47_73";
  params_56->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_55, output_56, params_56);
  struct csinn_tensor *output_57 = csinn_alloc_tensor(sess);
  output_57->name = "output_57";
  output_57->dtype = CSINN_DTYPE_FLOAT16;
  output_57->layout = CSINN_LAYOUT_NCHW;
  output_57->dim[0] = 1;
  output_57->dim[1] = 64;
  output_57->dim[2] = 14;
  output_57->dim[3] = 14;
  output_57->dim_count = 4;
  output_57->qinfo = (struct csinn_quant_info *)(params_base + 316344);
  output_57->quant_channel = 1;
  struct csinn_tensor *kernel_57 = csinn_alloc_tensor(sess);
  kernel_57->name = "kernel_57";
  kernel_57->data = params_base + 316424;
  kernel_57->is_const = 1;
  kernel_57->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_57->dtype = CSINN_DTYPE_FLOAT16;
  kernel_57->layout = CSINN_LAYOUT_OIHW;
  kernel_57->dim[0] = 64;
  kernel_57->dim[1] = 384;
  kernel_57->dim[2] = 1;
  kernel_57->dim[3] = 1;
  kernel_57->dim_count = 4;
  kernel_57->qinfo = (struct csinn_quant_info *)(params_base + 316368);
  kernel_57->quant_channel = 1;
  struct csinn_tensor *bias_57 = csinn_alloc_tensor(sess);
  bias_57->name = "bias_57";
  bias_57->data = params_base + 365600;
  bias_57->is_const = 1;
  bias_57->dtype = CSINN_DTYPE_FLOAT16;
  bias_57->layout = CSINN_LAYOUT_O;
  bias_57->dim[0] = 64;
  bias_57->dim_count = 1;
  bias_57->qinfo = (struct csinn_quant_info *)(params_base + 365576);
  bias_57->quant_channel = 1;
  struct csinn_conv2d_params *params_57 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_57->group = 1;
  params_57->stride_height = 1;
  params_57->stride_width = 1;
  params_57->dilation_height = 1;
  params_57->dilation_width = 1;
  params_57->conv_extra.kernel_tm = NULL;
  params_57->conv_extra.conv_mode = CSINN_DIRECT;
  params_57->pad_top = 0;
  params_57->pad_left = 0;
  params_57->pad_down = 0;
  params_57->pad_right = 0;
  params_57->base.name = "conv2d_Conv_48_PART_0_74_fuse_bias_add_Conv_48_75";
  params_57->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_56, output_57, kernel_57, bias_57, params_57);
  struct csinn_tensor *output_58 = csinn_alloc_tensor(sess);
  output_58->name = "output_58";
  output_58->dtype = CSINN_DTYPE_FLOAT16;
  output_58->layout = CSINN_LAYOUT_NCHW;
  output_58->dim[0] = 1;
  output_58->dim[1] = 64;
  output_58->dim[2] = 14;
  output_58->dim[3] = 14;
  output_58->dim_count = 4;
  output_58->qinfo = (struct csinn_quant_info *)(params_base + 365728);
  output_58->quant_channel = 1;
  struct csinn_diso_params *params_58 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_58->base.name = "add_Add_49_76";
  params_58->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_50, output_57, output_58, params_58);
  struct csinn_tensor *output_61 = csinn_alloc_tensor(sess);
  output_61->name = "output_61";
  output_61->dtype = CSINN_DTYPE_FLOAT16;
  output_61->layout = CSINN_LAYOUT_NCHW;
  output_61->dim[0] = 1;
  output_61->dim[1] = 384;
  output_61->dim[2] = 14;
  output_61->dim[3] = 14;
  output_61->dim_count = 4;
  output_61->qinfo = (struct csinn_quant_info *)(params_base + 365752);
  output_61->quant_channel = 1;
  struct csinn_tensor *kernel_61 = csinn_alloc_tensor(sess);
  kernel_61->name = "kernel_61";
  kernel_61->data = params_base + 365832;
  kernel_61->is_const = 1;
  kernel_61->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_61->dtype = CSINN_DTYPE_FLOAT16;
  kernel_61->layout = CSINN_LAYOUT_OIHW;
  kernel_61->dim[0] = 384;
  kernel_61->dim[1] = 64;
  kernel_61->dim[2] = 1;
  kernel_61->dim[3] = 1;
  kernel_61->dim_count = 4;
  kernel_61->qinfo = (struct csinn_quant_info *)(params_base + 365776);
  kernel_61->quant_channel = 1;
  struct csinn_tensor *bias_61 = csinn_alloc_tensor(sess);
  bias_61->name = "bias_61";
  bias_61->data = params_base + 415008;
  bias_61->is_const = 1;
  bias_61->dtype = CSINN_DTYPE_FLOAT16;
  bias_61->layout = CSINN_LAYOUT_O;
  bias_61->dim[0] = 384;
  bias_61->dim_count = 1;
  bias_61->qinfo = (struct csinn_quant_info *)(params_base + 414984);
  bias_61->quant_channel = 1;
  struct csinn_conv2d_params *params_61 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_61->group = 1;
  params_61->stride_height = 1;
  params_61->stride_width = 1;
  params_61->dilation_height = 1;
  params_61->dilation_width = 1;
  params_61->conv_extra.kernel_tm = NULL;
  params_61->conv_extra.conv_mode = CSINN_DIRECT;
  params_61->pad_top = 0;
  params_61->pad_left = 0;
  params_61->pad_down = 0;
  params_61->pad_right = 0;
  params_61->base.name = "conv2d_Conv_50_PART_0_77_fuse_bias_add_Conv_50_78";
  params_61->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_58, output_61, kernel_61, bias_61, params_61);
  struct csinn_tensor *output_62 = csinn_alloc_tensor(sess);
  output_62->name = "output_62";
  output_62->dtype = CSINN_DTYPE_FLOAT16;
  output_62->layout = CSINN_LAYOUT_NCHW;
  output_62->dim[0] = 1;
  output_62->dim[1] = 384;
  output_62->dim[2] = 14;
  output_62->dim[3] = 14;
  output_62->dim_count = 4;
  output_62->qinfo = (struct csinn_quant_info *)(params_base + 415776);
  output_62->quant_channel = 1;
  struct csinn_relu_params *params_62 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_62->n = 6;
  params_62->base.name = "clip_Clip_51_79";
  params_62->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_61, output_62, params_62);
  struct csinn_tensor *output_63 = csinn_alloc_tensor(sess);
  output_63->name = "output_63";
  output_63->dtype = CSINN_DTYPE_FLOAT16;
  output_63->layout = CSINN_LAYOUT_NCHW;
  output_63->dim[0] = 1;
  output_63->dim[1] = 384;
  output_63->dim[2] = 14;
  output_63->dim[3] = 14;
  output_63->dim_count = 4;
  output_63->qinfo = (struct csinn_quant_info *)(params_base + 415800);
  output_63->quant_channel = 1;
  struct csinn_tensor *kernel_63 = csinn_alloc_tensor(sess);
  kernel_63->name = "kernel_63";
  kernel_63->data = params_base + 415880;
  kernel_63->is_const = 1;
  kernel_63->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_63->dtype = CSINN_DTYPE_FLOAT16;
  kernel_63->layout = CSINN_LAYOUT_O1HW;
  kernel_63->dim[0] = 384;
  kernel_63->dim[1] = 1;
  kernel_63->dim[2] = 3;
  kernel_63->dim[3] = 3;
  kernel_63->dim_count = 4;
  kernel_63->qinfo = (struct csinn_quant_info *)(params_base + 415824);
  kernel_63->quant_channel = 1;
  struct csinn_tensor *bias_63 = csinn_alloc_tensor(sess);
  bias_63->name = "bias_63";
  bias_63->data = params_base + 422816;
  bias_63->is_const = 1;
  bias_63->dtype = CSINN_DTYPE_FLOAT16;
  bias_63->layout = CSINN_LAYOUT_O;
  bias_63->dim[0] = 384;
  bias_63->dim_count = 1;
  bias_63->qinfo = (struct csinn_quant_info *)(params_base + 422792);
  bias_63->quant_channel = 1;
  struct csinn_conv2d_params *params_63 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_63->group = 384;
  params_63->stride_height = 1;
  params_63->stride_width = 1;
  params_63->dilation_height = 1;
  params_63->dilation_width = 1;
  params_63->conv_extra.kernel_tm = NULL;
  params_63->conv_extra.conv_mode = CSINN_DIRECT;
  params_63->pad_top = 1;
  params_63->pad_left = 1;
  params_63->pad_down = 1;
  params_63->pad_right = 1;
  params_63->base.name = "conv2d_Conv_52_PART_0_80_fuse_bias_add_Conv_52_81";
  params_63->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_62, output_63, kernel_63, bias_63, params_63);
  struct csinn_tensor *output_64 = csinn_alloc_tensor(sess);
  output_64->name = "output_64";
  output_64->dtype = CSINN_DTYPE_FLOAT16;
  output_64->layout = CSINN_LAYOUT_NCHW;
  output_64->dim[0] = 1;
  output_64->dim[1] = 384;
  output_64->dim[2] = 14;
  output_64->dim[3] = 14;
  output_64->dim_count = 4;
  output_64->qinfo = (struct csinn_quant_info *)(params_base + 423584);
  output_64->quant_channel = 1;
  struct csinn_relu_params *params_64 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_64->n = 6;
  params_64->base.name = "clip_Clip_53_82";
  params_64->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_63, output_64, params_64);
  struct csinn_tensor *output_65 = csinn_alloc_tensor(sess);
  output_65->name = "output_65";
  output_65->dtype = CSINN_DTYPE_FLOAT16;
  output_65->layout = CSINN_LAYOUT_NCHW;
  output_65->dim[0] = 1;
  output_65->dim[1] = 64;
  output_65->dim[2] = 14;
  output_65->dim[3] = 14;
  output_65->dim_count = 4;
  output_65->qinfo = (struct csinn_quant_info *)(params_base + 423608);
  output_65->quant_channel = 1;
  struct csinn_tensor *kernel_65 = csinn_alloc_tensor(sess);
  kernel_65->name = "kernel_65";
  kernel_65->data = params_base + 423688;
  kernel_65->is_const = 1;
  kernel_65->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_65->dtype = CSINN_DTYPE_FLOAT16;
  kernel_65->layout = CSINN_LAYOUT_OIHW;
  kernel_65->dim[0] = 64;
  kernel_65->dim[1] = 384;
  kernel_65->dim[2] = 1;
  kernel_65->dim[3] = 1;
  kernel_65->dim_count = 4;
  kernel_65->qinfo = (struct csinn_quant_info *)(params_base + 423632);
  kernel_65->quant_channel = 1;
  struct csinn_tensor *bias_65 = csinn_alloc_tensor(sess);
  bias_65->name = "bias_65";
  bias_65->data = params_base + 472864;
  bias_65->is_const = 1;
  bias_65->dtype = CSINN_DTYPE_FLOAT16;
  bias_65->layout = CSINN_LAYOUT_O;
  bias_65->dim[0] = 64;
  bias_65->dim_count = 1;
  bias_65->qinfo = (struct csinn_quant_info *)(params_base + 472840);
  bias_65->quant_channel = 1;
  struct csinn_conv2d_params *params_65 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_65->group = 1;
  params_65->stride_height = 1;
  params_65->stride_width = 1;
  params_65->dilation_height = 1;
  params_65->dilation_width = 1;
  params_65->conv_extra.kernel_tm = NULL;
  params_65->conv_extra.conv_mode = CSINN_DIRECT;
  params_65->pad_top = 0;
  params_65->pad_left = 0;
  params_65->pad_down = 0;
  params_65->pad_right = 0;
  params_65->base.name = "conv2d_Conv_54_PART_0_83_fuse_bias_add_Conv_54_84";
  params_65->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_64, output_65, kernel_65, bias_65, params_65);
  struct csinn_tensor *output_66 = csinn_alloc_tensor(sess);
  output_66->name = "output_66";
  output_66->dtype = CSINN_DTYPE_FLOAT16;
  output_66->layout = CSINN_LAYOUT_NCHW;
  output_66->dim[0] = 1;
  output_66->dim[1] = 64;
  output_66->dim[2] = 14;
  output_66->dim[3] = 14;
  output_66->dim_count = 4;
  output_66->qinfo = (struct csinn_quant_info *)(params_base + 472992);
  output_66->quant_channel = 1;
  struct csinn_diso_params *params_66 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_66->base.name = "add_Add_55_85";
  params_66->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_58, output_65, output_66, params_66);
  struct csinn_tensor *output_68 = csinn_alloc_tensor(sess);
  output_68->name = "output_68";
  output_68->dtype = CSINN_DTYPE_FLOAT16;
  output_68->layout = CSINN_LAYOUT_NCHW;
  output_68->dim[0] = 1;
  output_68->dim[1] = 384;
  output_68->dim[2] = 14;
  output_68->dim[3] = 14;
  output_68->dim_count = 4;
  output_68->qinfo = (struct csinn_quant_info *)(params_base + 473016);
  output_68->quant_channel = 1;
  struct csinn_tensor *kernel_68 = csinn_alloc_tensor(sess);
  kernel_68->name = "kernel_68";
  kernel_68->data = params_base + 473096;
  kernel_68->is_const = 1;
  kernel_68->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_68->dtype = CSINN_DTYPE_FLOAT16;
  kernel_68->layout = CSINN_LAYOUT_OIHW;
  kernel_68->dim[0] = 384;
  kernel_68->dim[1] = 64;
  kernel_68->dim[2] = 1;
  kernel_68->dim[3] = 1;
  kernel_68->dim_count = 4;
  kernel_68->qinfo = (struct csinn_quant_info *)(params_base + 473040);
  kernel_68->quant_channel = 1;
  struct csinn_tensor *bias_68 = csinn_alloc_tensor(sess);
  bias_68->name = "bias_68";
  bias_68->data = params_base + 522272;
  bias_68->is_const = 1;
  bias_68->dtype = CSINN_DTYPE_FLOAT16;
  bias_68->layout = CSINN_LAYOUT_O;
  bias_68->dim[0] = 384;
  bias_68->dim_count = 1;
  bias_68->qinfo = (struct csinn_quant_info *)(params_base + 522248);
  bias_68->quant_channel = 1;
  struct csinn_conv2d_params *params_68 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_68->group = 1;
  params_68->stride_height = 1;
  params_68->stride_width = 1;
  params_68->dilation_height = 1;
  params_68->dilation_width = 1;
  params_68->conv_extra.kernel_tm = NULL;
  params_68->conv_extra.conv_mode = CSINN_DIRECT;
  params_68->pad_top = 0;
  params_68->pad_left = 0;
  params_68->pad_down = 0;
  params_68->pad_right = 0;
  params_68->base.name = "conv2d_Conv_56_PART_0_86_fuse_bias_add_Conv_56_87";
  params_68->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_66, output_68, kernel_68, bias_68, params_68);
  struct csinn_tensor *output_69 = csinn_alloc_tensor(sess);
  output_69->name = "output_69";
  output_69->dtype = CSINN_DTYPE_FLOAT16;
  output_69->layout = CSINN_LAYOUT_NCHW;
  output_69->dim[0] = 1;
  output_69->dim[1] = 384;
  output_69->dim[2] = 14;
  output_69->dim[3] = 14;
  output_69->dim_count = 4;
  output_69->qinfo = (struct csinn_quant_info *)(params_base + 523040);
  output_69->quant_channel = 1;
  struct csinn_relu_params *params_69 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_69->n = 6;
  params_69->base.name = "clip_Clip_57_88";
  params_69->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_68, output_69, params_69);
  struct csinn_tensor *output_70 = csinn_alloc_tensor(sess);
  output_70->name = "output_70";
  output_70->dtype = CSINN_DTYPE_FLOAT16;
  output_70->layout = CSINN_LAYOUT_NCHW;
  output_70->dim[0] = 1;
  output_70->dim[1] = 384;
  output_70->dim[2] = 14;
  output_70->dim[3] = 14;
  output_70->dim_count = 4;
  output_70->qinfo = (struct csinn_quant_info *)(params_base + 523064);
  output_70->quant_channel = 1;
  struct csinn_tensor *kernel_70 = csinn_alloc_tensor(sess);
  kernel_70->name = "kernel_70";
  kernel_70->data = params_base + 523144;
  kernel_70->is_const = 1;
  kernel_70->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_70->dtype = CSINN_DTYPE_FLOAT16;
  kernel_70->layout = CSINN_LAYOUT_O1HW;
  kernel_70->dim[0] = 384;
  kernel_70->dim[1] = 1;
  kernel_70->dim[2] = 3;
  kernel_70->dim[3] = 3;
  kernel_70->dim_count = 4;
  kernel_70->qinfo = (struct csinn_quant_info *)(params_base + 523088);
  kernel_70->quant_channel = 1;
  struct csinn_tensor *bias_70 = csinn_alloc_tensor(sess);
  bias_70->name = "bias_70";
  bias_70->data = params_base + 530080;
  bias_70->is_const = 1;
  bias_70->dtype = CSINN_DTYPE_FLOAT16;
  bias_70->layout = CSINN_LAYOUT_O;
  bias_70->dim[0] = 384;
  bias_70->dim_count = 1;
  bias_70->qinfo = (struct csinn_quant_info *)(params_base + 530056);
  bias_70->quant_channel = 1;
  struct csinn_conv2d_params *params_70 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_70->group = 384;
  params_70->stride_height = 1;
  params_70->stride_width = 1;
  params_70->dilation_height = 1;
  params_70->dilation_width = 1;
  params_70->conv_extra.kernel_tm = NULL;
  params_70->conv_extra.conv_mode = CSINN_DIRECT;
  params_70->pad_top = 1;
  params_70->pad_left = 1;
  params_70->pad_down = 1;
  params_70->pad_right = 1;
  params_70->base.name = "conv2d_Conv_58_PART_0_89_fuse_bias_add_Conv_58_90";
  params_70->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_69, output_70, kernel_70, bias_70, params_70);
  struct csinn_tensor *output_71 = csinn_alloc_tensor(sess);
  output_71->name = "output_71";
  output_71->dtype = CSINN_DTYPE_FLOAT16;
  output_71->layout = CSINN_LAYOUT_NCHW;
  output_71->dim[0] = 1;
  output_71->dim[1] = 384;
  output_71->dim[2] = 14;
  output_71->dim[3] = 14;
  output_71->dim_count = 4;
  output_71->qinfo = (struct csinn_quant_info *)(params_base + 530848);
  output_71->quant_channel = 1;
  struct csinn_relu_params *params_71 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_71->n = 6;
  params_71->base.name = "clip_Clip_59_91";
  params_71->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_70, output_71, params_71);
  struct csinn_tensor *output_72 = csinn_alloc_tensor(sess);
  output_72->name = "output_72";
  output_72->dtype = CSINN_DTYPE_FLOAT16;
  output_72->layout = CSINN_LAYOUT_NCHW;
  output_72->dim[0] = 1;
  output_72->dim[1] = 96;
  output_72->dim[2] = 14;
  output_72->dim[3] = 14;
  output_72->dim_count = 4;
  output_72->qinfo = (struct csinn_quant_info *)(params_base + 530872);
  output_72->quant_channel = 1;
  struct csinn_tensor *kernel_72 = csinn_alloc_tensor(sess);
  kernel_72->name = "kernel_72";
  kernel_72->data = params_base + 530952;
  kernel_72->is_const = 1;
  kernel_72->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_72->dtype = CSINN_DTYPE_FLOAT16;
  kernel_72->layout = CSINN_LAYOUT_OIHW;
  kernel_72->dim[0] = 96;
  kernel_72->dim[1] = 384;
  kernel_72->dim[2] = 1;
  kernel_72->dim[3] = 1;
  kernel_72->dim_count = 4;
  kernel_72->qinfo = (struct csinn_quant_info *)(params_base + 530896);
  kernel_72->quant_channel = 1;
  struct csinn_tensor *bias_72 = csinn_alloc_tensor(sess);
  bias_72->name = "bias_72";
  bias_72->data = params_base + 604704;
  bias_72->is_const = 1;
  bias_72->dtype = CSINN_DTYPE_FLOAT16;
  bias_72->layout = CSINN_LAYOUT_O;
  bias_72->dim[0] = 96;
  bias_72->dim_count = 1;
  bias_72->qinfo = (struct csinn_quant_info *)(params_base + 604680);
  bias_72->quant_channel = 1;
  struct csinn_conv2d_params *params_72 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_72->group = 1;
  params_72->stride_height = 1;
  params_72->stride_width = 1;
  params_72->dilation_height = 1;
  params_72->dilation_width = 1;
  params_72->conv_extra.kernel_tm = NULL;
  params_72->conv_extra.conv_mode = CSINN_DIRECT;
  params_72->pad_top = 0;
  params_72->pad_left = 0;
  params_72->pad_down = 0;
  params_72->pad_right = 0;
  params_72->base.name = "conv2d_Conv_60_PART_0_92_fuse_bias_add_Conv_60_93";
  params_72->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_71, output_72, kernel_72, bias_72, params_72);
  struct csinn_tensor *output_74 = csinn_alloc_tensor(sess);
  output_74->name = "output_74";
  output_74->dtype = CSINN_DTYPE_FLOAT16;
  output_74->layout = CSINN_LAYOUT_NCHW;
  output_74->dim[0] = 1;
  output_74->dim[1] = 576;
  output_74->dim[2] = 14;
  output_74->dim[3] = 14;
  output_74->dim_count = 4;
  output_74->qinfo = (struct csinn_quant_info *)(params_base + 604896);
  output_74->quant_channel = 1;
  struct csinn_tensor *kernel_74 = csinn_alloc_tensor(sess);
  kernel_74->name = "kernel_74";
  kernel_74->data = params_base + 604968;
  kernel_74->is_const = 1;
  kernel_74->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_74->dtype = CSINN_DTYPE_FLOAT16;
  kernel_74->layout = CSINN_LAYOUT_OIHW;
  kernel_74->dim[0] = 576;
  kernel_74->dim[1] = 96;
  kernel_74->dim[2] = 1;
  kernel_74->dim[3] = 1;
  kernel_74->dim_count = 4;
  kernel_74->qinfo = (struct csinn_quant_info *)(params_base + 604920);
  kernel_74->quant_channel = 1;
  struct csinn_tensor *bias_74 = csinn_alloc_tensor(sess);
  bias_74->name = "bias_74";
  bias_74->data = params_base + 715584;
  bias_74->is_const = 1;
  bias_74->dtype = CSINN_DTYPE_FLOAT16;
  bias_74->layout = CSINN_LAYOUT_O;
  bias_74->dim[0] = 576;
  bias_74->dim_count = 1;
  bias_74->qinfo = (struct csinn_quant_info *)(params_base + 715560);
  bias_74->quant_channel = 1;
  struct csinn_conv2d_params *params_74 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_74->group = 1;
  params_74->stride_height = 1;
  params_74->stride_width = 1;
  params_74->dilation_height = 1;
  params_74->dilation_width = 1;
  params_74->conv_extra.kernel_tm = NULL;
  params_74->conv_extra.conv_mode = CSINN_DIRECT;
  params_74->pad_top = 0;
  params_74->pad_left = 0;
  params_74->pad_down = 0;
  params_74->pad_right = 0;
  params_74->base.name = "conv2d_Conv_61_PART_0_94_fuse_bias_add_Conv_61_95";
  params_74->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_72, output_74, kernel_74, bias_74, params_74);
  struct csinn_tensor *output_75 = csinn_alloc_tensor(sess);
  output_75->name = "output_75";
  output_75->dtype = CSINN_DTYPE_FLOAT16;
  output_75->layout = CSINN_LAYOUT_NCHW;
  output_75->dim[0] = 1;
  output_75->dim[1] = 576;
  output_75->dim[2] = 14;
  output_75->dim[3] = 14;
  output_75->dim_count = 4;
  output_75->qinfo = (struct csinn_quant_info *)(params_base + 716736);
  output_75->quant_channel = 1;
  struct csinn_relu_params *params_75 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_75->n = 6;
  params_75->base.name = "clip_Clip_62_96";
  params_75->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_74, output_75, params_75);
  struct csinn_tensor *output_76 = csinn_alloc_tensor(sess);
  output_76->name = "output_76";
  output_76->dtype = CSINN_DTYPE_FLOAT16;
  output_76->layout = CSINN_LAYOUT_NCHW;
  output_76->dim[0] = 1;
  output_76->dim[1] = 576;
  output_76->dim[2] = 14;
  output_76->dim[3] = 14;
  output_76->dim_count = 4;
  output_76->qinfo = (struct csinn_quant_info *)(params_base + 716760);
  output_76->quant_channel = 1;
  struct csinn_tensor *kernel_76 = csinn_alloc_tensor(sess);
  kernel_76->name = "kernel_76";
  kernel_76->data = params_base + 716840;
  kernel_76->is_const = 1;
  kernel_76->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_76->dtype = CSINN_DTYPE_FLOAT16;
  kernel_76->layout = CSINN_LAYOUT_O1HW;
  kernel_76->dim[0] = 576;
  kernel_76->dim[1] = 1;
  kernel_76->dim[2] = 3;
  kernel_76->dim[3] = 3;
  kernel_76->dim_count = 4;
  kernel_76->qinfo = (struct csinn_quant_info *)(params_base + 716784);
  kernel_76->quant_channel = 1;
  struct csinn_tensor *bias_76 = csinn_alloc_tensor(sess);
  bias_76->name = "bias_76";
  bias_76->data = params_base + 727232;
  bias_76->is_const = 1;
  bias_76->dtype = CSINN_DTYPE_FLOAT16;
  bias_76->layout = CSINN_LAYOUT_O;
  bias_76->dim[0] = 576;
  bias_76->dim_count = 1;
  bias_76->qinfo = (struct csinn_quant_info *)(params_base + 727208);
  bias_76->quant_channel = 1;
  struct csinn_conv2d_params *params_76 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_76->group = 576;
  params_76->stride_height = 1;
  params_76->stride_width = 1;
  params_76->dilation_height = 1;
  params_76->dilation_width = 1;
  params_76->conv_extra.kernel_tm = NULL;
  params_76->conv_extra.conv_mode = CSINN_DIRECT;
  params_76->pad_top = 1;
  params_76->pad_left = 1;
  params_76->pad_down = 1;
  params_76->pad_right = 1;
  params_76->base.name = "conv2d_Conv_63_PART_0_97_fuse_bias_add_Conv_63_98";
  params_76->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_75, output_76, kernel_76, bias_76, params_76);
  struct csinn_tensor *output_77 = csinn_alloc_tensor(sess);
  output_77->name = "output_77";
  output_77->dtype = CSINN_DTYPE_FLOAT16;
  output_77->layout = CSINN_LAYOUT_NCHW;
  output_77->dim[0] = 1;
  output_77->dim[1] = 576;
  output_77->dim[2] = 14;
  output_77->dim[3] = 14;
  output_77->dim_count = 4;
  output_77->qinfo = (struct csinn_quant_info *)(params_base + 728384);
  output_77->quant_channel = 1;
  struct csinn_relu_params *params_77 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_77->n = 6;
  params_77->base.name = "clip_Clip_64_99";
  params_77->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_76, output_77, params_77);
  struct csinn_tensor *output_78 = csinn_alloc_tensor(sess);
  output_78->name = "output_78";
  output_78->dtype = CSINN_DTYPE_FLOAT16;
  output_78->layout = CSINN_LAYOUT_NCHW;
  output_78->dim[0] = 1;
  output_78->dim[1] = 96;
  output_78->dim[2] = 14;
  output_78->dim[3] = 14;
  output_78->dim_count = 4;
  output_78->qinfo = (struct csinn_quant_info *)(params_base + 728408);
  output_78->quant_channel = 1;
  struct csinn_tensor *kernel_78 = csinn_alloc_tensor(sess);
  kernel_78->name = "kernel_78";
  kernel_78->data = params_base + 728488;
  kernel_78->is_const = 1;
  kernel_78->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_78->dtype = CSINN_DTYPE_FLOAT16;
  kernel_78->layout = CSINN_LAYOUT_OIHW;
  kernel_78->dim[0] = 96;
  kernel_78->dim[1] = 576;
  kernel_78->dim[2] = 1;
  kernel_78->dim[3] = 1;
  kernel_78->dim_count = 4;
  kernel_78->qinfo = (struct csinn_quant_info *)(params_base + 728432);
  kernel_78->quant_channel = 1;
  struct csinn_tensor *bias_78 = csinn_alloc_tensor(sess);
  bias_78->name = "bias_78";
  bias_78->data = params_base + 839104;
  bias_78->is_const = 1;
  bias_78->dtype = CSINN_DTYPE_FLOAT16;
  bias_78->layout = CSINN_LAYOUT_O;
  bias_78->dim[0] = 96;
  bias_78->dim_count = 1;
  bias_78->qinfo = (struct csinn_quant_info *)(params_base + 839080);
  bias_78->quant_channel = 1;
  struct csinn_conv2d_params *params_78 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_78->group = 1;
  params_78->stride_height = 1;
  params_78->stride_width = 1;
  params_78->dilation_height = 1;
  params_78->dilation_width = 1;
  params_78->conv_extra.kernel_tm = NULL;
  params_78->conv_extra.conv_mode = CSINN_DIRECT;
  params_78->pad_top = 0;
  params_78->pad_left = 0;
  params_78->pad_down = 0;
  params_78->pad_right = 0;
  params_78->base.name = "conv2d_Conv_65_PART_0_100_fuse_bias_add_Conv_65_101";
  params_78->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_77, output_78, kernel_78, bias_78, params_78);
  struct csinn_tensor *output_79 = csinn_alloc_tensor(sess);
  output_79->name = "output_79";
  output_79->dtype = CSINN_DTYPE_FLOAT16;
  output_79->layout = CSINN_LAYOUT_NCHW;
  output_79->dim[0] = 1;
  output_79->dim[1] = 96;
  output_79->dim[2] = 14;
  output_79->dim[3] = 14;
  output_79->dim_count = 4;
  output_79->qinfo = (struct csinn_quant_info *)(params_base + 839296);
  output_79->quant_channel = 1;
  struct csinn_diso_params *params_79 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_79->base.name = "add_Add_66_102";
  params_79->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_72, output_78, output_79, params_79);
  struct csinn_tensor *output_82 = csinn_alloc_tensor(sess);
  output_82->name = "output_82";
  output_82->dtype = CSINN_DTYPE_FLOAT16;
  output_82->layout = CSINN_LAYOUT_NCHW;
  output_82->dim[0] = 1;
  output_82->dim[1] = 576;
  output_82->dim[2] = 14;
  output_82->dim[3] = 14;
  output_82->dim_count = 4;
  output_82->qinfo = (struct csinn_quant_info *)(params_base + 839320);
  output_82->quant_channel = 1;
  struct csinn_tensor *kernel_82 = csinn_alloc_tensor(sess);
  kernel_82->name = "kernel_82";
  kernel_82->data = params_base + 839400;
  kernel_82->is_const = 1;
  kernel_82->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_82->dtype = CSINN_DTYPE_FLOAT16;
  kernel_82->layout = CSINN_LAYOUT_OIHW;
  kernel_82->dim[0] = 576;
  kernel_82->dim[1] = 96;
  kernel_82->dim[2] = 1;
  kernel_82->dim[3] = 1;
  kernel_82->dim_count = 4;
  kernel_82->qinfo = (struct csinn_quant_info *)(params_base + 839344);
  kernel_82->quant_channel = 1;
  struct csinn_tensor *bias_82 = csinn_alloc_tensor(sess);
  bias_82->name = "bias_82";
  bias_82->data = params_base + 950016;
  bias_82->is_const = 1;
  bias_82->dtype = CSINN_DTYPE_FLOAT16;
  bias_82->layout = CSINN_LAYOUT_O;
  bias_82->dim[0] = 576;
  bias_82->dim_count = 1;
  bias_82->qinfo = (struct csinn_quant_info *)(params_base + 949992);
  bias_82->quant_channel = 1;
  struct csinn_conv2d_params *params_82 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_82->group = 1;
  params_82->stride_height = 1;
  params_82->stride_width = 1;
  params_82->dilation_height = 1;
  params_82->dilation_width = 1;
  params_82->conv_extra.kernel_tm = NULL;
  params_82->conv_extra.conv_mode = CSINN_DIRECT;
  params_82->pad_top = 0;
  params_82->pad_left = 0;
  params_82->pad_down = 0;
  params_82->pad_right = 0;
  params_82->base.name = "conv2d_Conv_67_PART_0_103_fuse_bias_add_Conv_67_104";
  params_82->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_79, output_82, kernel_82, bias_82, params_82);
  struct csinn_tensor *output_83 = csinn_alloc_tensor(sess);
  output_83->name = "output_83";
  output_83->dtype = CSINN_DTYPE_FLOAT16;
  output_83->layout = CSINN_LAYOUT_NCHW;
  output_83->dim[0] = 1;
  output_83->dim[1] = 576;
  output_83->dim[2] = 14;
  output_83->dim[3] = 14;
  output_83->dim_count = 4;
  output_83->qinfo = (struct csinn_quant_info *)(params_base + 951168);
  output_83->quant_channel = 1;
  struct csinn_relu_params *params_83 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_83->n = 6;
  params_83->base.name = "clip_Clip_68_105";
  params_83->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_82, output_83, params_83);
  struct csinn_tensor *output_84 = csinn_alloc_tensor(sess);
  output_84->name = "output_84";
  output_84->dtype = CSINN_DTYPE_FLOAT16;
  output_84->layout = CSINN_LAYOUT_NCHW;
  output_84->dim[0] = 1;
  output_84->dim[1] = 576;
  output_84->dim[2] = 14;
  output_84->dim[3] = 14;
  output_84->dim_count = 4;
  output_84->qinfo = (struct csinn_quant_info *)(params_base + 951192);
  output_84->quant_channel = 1;
  struct csinn_tensor *kernel_84 = csinn_alloc_tensor(sess);
  kernel_84->name = "kernel_84";
  kernel_84->data = params_base + 951272;
  kernel_84->is_const = 1;
  kernel_84->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_84->dtype = CSINN_DTYPE_FLOAT16;
  kernel_84->layout = CSINN_LAYOUT_O1HW;
  kernel_84->dim[0] = 576;
  kernel_84->dim[1] = 1;
  kernel_84->dim[2] = 3;
  kernel_84->dim[3] = 3;
  kernel_84->dim_count = 4;
  kernel_84->qinfo = (struct csinn_quant_info *)(params_base + 951216);
  kernel_84->quant_channel = 1;
  struct csinn_tensor *bias_84 = csinn_alloc_tensor(sess);
  bias_84->name = "bias_84";
  bias_84->data = params_base + 961664;
  bias_84->is_const = 1;
  bias_84->dtype = CSINN_DTYPE_FLOAT16;
  bias_84->layout = CSINN_LAYOUT_O;
  bias_84->dim[0] = 576;
  bias_84->dim_count = 1;
  bias_84->qinfo = (struct csinn_quant_info *)(params_base + 961640);
  bias_84->quant_channel = 1;
  struct csinn_conv2d_params *params_84 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_84->group = 576;
  params_84->stride_height = 1;
  params_84->stride_width = 1;
  params_84->dilation_height = 1;
  params_84->dilation_width = 1;
  params_84->conv_extra.kernel_tm = NULL;
  params_84->conv_extra.conv_mode = CSINN_DIRECT;
  params_84->pad_top = 1;
  params_84->pad_left = 1;
  params_84->pad_down = 1;
  params_84->pad_right = 1;
  params_84->base.name = "conv2d_Conv_69_PART_0_106_fuse_bias_add_Conv_69_107";
  params_84->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_83, output_84, kernel_84, bias_84, params_84);
  struct csinn_tensor *output_85 = csinn_alloc_tensor(sess);
  output_85->name = "output_85";
  output_85->dtype = CSINN_DTYPE_FLOAT16;
  output_85->layout = CSINN_LAYOUT_NCHW;
  output_85->dim[0] = 1;
  output_85->dim[1] = 576;
  output_85->dim[2] = 14;
  output_85->dim[3] = 14;
  output_85->dim_count = 4;
  output_85->qinfo = (struct csinn_quant_info *)(params_base + 962816);
  output_85->quant_channel = 1;
  struct csinn_relu_params *params_85 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_85->n = 6;
  params_85->base.name = "clip_Clip_70_108";
  params_85->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_84, output_85, params_85);
  struct csinn_tensor *output_86 = csinn_alloc_tensor(sess);
  output_86->name = "output_86";
  output_86->dtype = CSINN_DTYPE_FLOAT16;
  output_86->layout = CSINN_LAYOUT_NCHW;
  output_86->dim[0] = 1;
  output_86->dim[1] = 96;
  output_86->dim[2] = 14;
  output_86->dim[3] = 14;
  output_86->dim_count = 4;
  output_86->qinfo = (struct csinn_quant_info *)(params_base + 962840);
  output_86->quant_channel = 1;
  struct csinn_tensor *kernel_86 = csinn_alloc_tensor(sess);
  kernel_86->name = "kernel_86";
  kernel_86->data = params_base + 962920;
  kernel_86->is_const = 1;
  kernel_86->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_86->dtype = CSINN_DTYPE_FLOAT16;
  kernel_86->layout = CSINN_LAYOUT_OIHW;
  kernel_86->dim[0] = 96;
  kernel_86->dim[1] = 576;
  kernel_86->dim[2] = 1;
  kernel_86->dim[3] = 1;
  kernel_86->dim_count = 4;
  kernel_86->qinfo = (struct csinn_quant_info *)(params_base + 962864);
  kernel_86->quant_channel = 1;
  struct csinn_tensor *bias_86 = csinn_alloc_tensor(sess);
  bias_86->name = "bias_86";
  bias_86->data = params_base + 1073536;
  bias_86->is_const = 1;
  bias_86->dtype = CSINN_DTYPE_FLOAT16;
  bias_86->layout = CSINN_LAYOUT_O;
  bias_86->dim[0] = 96;
  bias_86->dim_count = 1;
  bias_86->qinfo = (struct csinn_quant_info *)(params_base + 1073512);
  bias_86->quant_channel = 1;
  struct csinn_conv2d_params *params_86 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_86->group = 1;
  params_86->stride_height = 1;
  params_86->stride_width = 1;
  params_86->dilation_height = 1;
  params_86->dilation_width = 1;
  params_86->conv_extra.kernel_tm = NULL;
  params_86->conv_extra.conv_mode = CSINN_DIRECT;
  params_86->pad_top = 0;
  params_86->pad_left = 0;
  params_86->pad_down = 0;
  params_86->pad_right = 0;
  params_86->base.name = "conv2d_Conv_71_PART_0_109_fuse_bias_add_Conv_71_110";
  params_86->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_85, output_86, kernel_86, bias_86, params_86);
  struct csinn_tensor *output_87 = csinn_alloc_tensor(sess);
  output_87->name = "output_87";
  output_87->dtype = CSINN_DTYPE_FLOAT16;
  output_87->layout = CSINN_LAYOUT_NCHW;
  output_87->dim[0] = 1;
  output_87->dim[1] = 96;
  output_87->dim[2] = 14;
  output_87->dim[3] = 14;
  output_87->dim_count = 4;
  output_87->qinfo = (struct csinn_quant_info *)(params_base + 1073728);
  output_87->quant_channel = 1;
  struct csinn_diso_params *params_87 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_87->base.name = "add_Add_72_111";
  params_87->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_79, output_86, output_87, params_87);
  struct csinn_tensor *output_89 = csinn_alloc_tensor(sess);
  output_89->name = "output_89";
  output_89->dtype = CSINN_DTYPE_FLOAT16;
  output_89->layout = CSINN_LAYOUT_NCHW;
  output_89->dim[0] = 1;
  output_89->dim[1] = 576;
  output_89->dim[2] = 14;
  output_89->dim[3] = 14;
  output_89->dim_count = 4;
  output_89->qinfo = (struct csinn_quant_info *)(params_base + 1073752);
  output_89->quant_channel = 1;
  struct csinn_tensor *kernel_89 = csinn_alloc_tensor(sess);
  kernel_89->name = "kernel_89";
  kernel_89->data = params_base + 1073832;
  kernel_89->is_const = 1;
  kernel_89->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_89->dtype = CSINN_DTYPE_FLOAT16;
  kernel_89->layout = CSINN_LAYOUT_OIHW;
  kernel_89->dim[0] = 576;
  kernel_89->dim[1] = 96;
  kernel_89->dim[2] = 1;
  kernel_89->dim[3] = 1;
  kernel_89->dim_count = 4;
  kernel_89->qinfo = (struct csinn_quant_info *)(params_base + 1073776);
  kernel_89->quant_channel = 1;
  struct csinn_tensor *bias_89 = csinn_alloc_tensor(sess);
  bias_89->name = "bias_89";
  bias_89->data = params_base + 1184448;
  bias_89->is_const = 1;
  bias_89->dtype = CSINN_DTYPE_FLOAT16;
  bias_89->layout = CSINN_LAYOUT_O;
  bias_89->dim[0] = 576;
  bias_89->dim_count = 1;
  bias_89->qinfo = (struct csinn_quant_info *)(params_base + 1184424);
  bias_89->quant_channel = 1;
  struct csinn_conv2d_params *params_89 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_89->group = 1;
  params_89->stride_height = 1;
  params_89->stride_width = 1;
  params_89->dilation_height = 1;
  params_89->dilation_width = 1;
  params_89->conv_extra.kernel_tm = NULL;
  params_89->conv_extra.conv_mode = CSINN_DIRECT;
  params_89->pad_top = 0;
  params_89->pad_left = 0;
  params_89->pad_down = 0;
  params_89->pad_right = 0;
  params_89->base.name = "conv2d_Conv_73_PART_0_112_fuse_bias_add_Conv_73_113";
  params_89->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_87, output_89, kernel_89, bias_89, params_89);
  struct csinn_tensor *output_90 = csinn_alloc_tensor(sess);
  output_90->name = "output_90";
  output_90->dtype = CSINN_DTYPE_FLOAT16;
  output_90->layout = CSINN_LAYOUT_NCHW;
  output_90->dim[0] = 1;
  output_90->dim[1] = 576;
  output_90->dim[2] = 14;
  output_90->dim[3] = 14;
  output_90->dim_count = 4;
  output_90->qinfo = (struct csinn_quant_info *)(params_base + 1185600);
  output_90->quant_channel = 1;
  struct csinn_relu_params *params_90 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_90->n = 6;
  params_90->base.name = "clip_Clip_74_114";
  params_90->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_89, output_90, params_90);
  struct csinn_tensor *output_91 = csinn_alloc_tensor(sess);
  output_91->name = "output_91";
  output_91->dtype = CSINN_DTYPE_FLOAT16;
  output_91->layout = CSINN_LAYOUT_NCHW;
  output_91->dim[0] = 1;
  output_91->dim[1] = 576;
  output_91->dim[2] = 7;
  output_91->dim[3] = 7;
  output_91->dim_count = 4;
  output_91->qinfo = (struct csinn_quant_info *)(params_base + 1185624);
  output_91->quant_channel = 1;
  struct csinn_tensor *kernel_91 = csinn_alloc_tensor(sess);
  kernel_91->name = "kernel_91";
  kernel_91->data = params_base + 1185704;
  kernel_91->is_const = 1;
  kernel_91->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_91->dtype = CSINN_DTYPE_FLOAT16;
  kernel_91->layout = CSINN_LAYOUT_O1HW;
  kernel_91->dim[0] = 576;
  kernel_91->dim[1] = 1;
  kernel_91->dim[2] = 3;
  kernel_91->dim[3] = 3;
  kernel_91->dim_count = 4;
  kernel_91->qinfo = (struct csinn_quant_info *)(params_base + 1185648);
  kernel_91->quant_channel = 1;
  struct csinn_tensor *bias_91 = csinn_alloc_tensor(sess);
  bias_91->name = "bias_91";
  bias_91->data = params_base + 1196096;
  bias_91->is_const = 1;
  bias_91->dtype = CSINN_DTYPE_FLOAT16;
  bias_91->layout = CSINN_LAYOUT_O;
  bias_91->dim[0] = 576;
  bias_91->dim_count = 1;
  bias_91->qinfo = (struct csinn_quant_info *)(params_base + 1196072);
  bias_91->quant_channel = 1;
  struct csinn_conv2d_params *params_91 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_91->group = 576;
  params_91->stride_height = 2;
  params_91->stride_width = 2;
  params_91->dilation_height = 1;
  params_91->dilation_width = 1;
  params_91->conv_extra.kernel_tm = NULL;
  params_91->conv_extra.conv_mode = CSINN_DIRECT;
  params_91->pad_top = 1;
  params_91->pad_left = 1;
  params_91->pad_down = 1;
  params_91->pad_right = 1;
  params_91->base.name = "conv2d_Conv_75_PART_0_115_fuse_bias_add_Conv_75_116";
  params_91->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_90, output_91, kernel_91, bias_91, params_91);
  struct csinn_tensor *output_92 = csinn_alloc_tensor(sess);
  output_92->name = "output_92";
  output_92->dtype = CSINN_DTYPE_FLOAT16;
  output_92->layout = CSINN_LAYOUT_NCHW;
  output_92->dim[0] = 1;
  output_92->dim[1] = 576;
  output_92->dim[2] = 7;
  output_92->dim[3] = 7;
  output_92->dim_count = 4;
  output_92->qinfo = (struct csinn_quant_info *)(params_base + 1197248);
  output_92->quant_channel = 1;
  struct csinn_relu_params *params_92 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_92->n = 6;
  params_92->base.name = "clip_Clip_76_117";
  params_92->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_91, output_92, params_92);
  struct csinn_tensor *output_93 = csinn_alloc_tensor(sess);
  output_93->name = "output_93";
  output_93->dtype = CSINN_DTYPE_FLOAT16;
  output_93->layout = CSINN_LAYOUT_NCHW;
  output_93->dim[0] = 1;
  output_93->dim[1] = 160;
  output_93->dim[2] = 7;
  output_93->dim[3] = 7;
  output_93->dim_count = 4;
  output_93->qinfo = (struct csinn_quant_info *)(params_base + 1197272);
  output_93->quant_channel = 1;
  struct csinn_tensor *kernel_93 = csinn_alloc_tensor(sess);
  kernel_93->name = "kernel_93";
  kernel_93->data = params_base + 1197352;
  kernel_93->is_const = 1;
  kernel_93->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_93->dtype = CSINN_DTYPE_FLOAT16;
  kernel_93->layout = CSINN_LAYOUT_OIHW;
  kernel_93->dim[0] = 160;
  kernel_93->dim[1] = 576;
  kernel_93->dim[2] = 1;
  kernel_93->dim[3] = 1;
  kernel_93->dim_count = 4;
  kernel_93->qinfo = (struct csinn_quant_info *)(params_base + 1197296);
  kernel_93->quant_channel = 1;
  struct csinn_tensor *bias_93 = csinn_alloc_tensor(sess);
  bias_93->name = "bias_93";
  bias_93->data = params_base + 1381696;
  bias_93->is_const = 1;
  bias_93->dtype = CSINN_DTYPE_FLOAT16;
  bias_93->layout = CSINN_LAYOUT_O;
  bias_93->dim[0] = 160;
  bias_93->dim_count = 1;
  bias_93->qinfo = (struct csinn_quant_info *)(params_base + 1381672);
  bias_93->quant_channel = 1;
  struct csinn_conv2d_params *params_93 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_93->group = 1;
  params_93->stride_height = 1;
  params_93->stride_width = 1;
  params_93->dilation_height = 1;
  params_93->dilation_width = 1;
  params_93->conv_extra.kernel_tm = NULL;
  params_93->conv_extra.conv_mode = CSINN_DIRECT;
  params_93->pad_top = 0;
  params_93->pad_left = 0;
  params_93->pad_down = 0;
  params_93->pad_right = 0;
  params_93->base.name = "conv2d_Conv_77_PART_0_118_fuse_bias_add_Conv_77_119";
  params_93->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_92, output_93, kernel_93, bias_93, params_93);
  struct csinn_tensor *output_95 = csinn_alloc_tensor(sess);
  output_95->name = "output_95";
  output_95->dtype = CSINN_DTYPE_FLOAT16;
  output_95->layout = CSINN_LAYOUT_NCHW;
  output_95->dim[0] = 1;
  output_95->dim[1] = 960;
  output_95->dim[2] = 7;
  output_95->dim[3] = 7;
  output_95->dim_count = 4;
  output_95->qinfo = (struct csinn_quant_info *)(params_base + 1382016);
  output_95->quant_channel = 1;
  struct csinn_tensor *kernel_95 = csinn_alloc_tensor(sess);
  kernel_95->name = "kernel_95";
  kernel_95->data = params_base + 1382088;
  kernel_95->is_const = 1;
  kernel_95->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_95->dtype = CSINN_DTYPE_FLOAT16;
  kernel_95->layout = CSINN_LAYOUT_OIHW;
  kernel_95->dim[0] = 960;
  kernel_95->dim[1] = 160;
  kernel_95->dim[2] = 1;
  kernel_95->dim[3] = 1;
  kernel_95->dim_count = 4;
  kernel_95->qinfo = (struct csinn_quant_info *)(params_base + 1382040);
  kernel_95->quant_channel = 1;
  struct csinn_tensor *bias_95 = csinn_alloc_tensor(sess);
  bias_95->name = "bias_95";
  bias_95->data = params_base + 1689312;
  bias_95->is_const = 1;
  bias_95->dtype = CSINN_DTYPE_FLOAT16;
  bias_95->layout = CSINN_LAYOUT_O;
  bias_95->dim[0] = 960;
  bias_95->dim_count = 1;
  bias_95->qinfo = (struct csinn_quant_info *)(params_base + 1689288);
  bias_95->quant_channel = 1;
  struct csinn_conv2d_params *params_95 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_95->group = 1;
  params_95->stride_height = 1;
  params_95->stride_width = 1;
  params_95->dilation_height = 1;
  params_95->dilation_width = 1;
  params_95->conv_extra.kernel_tm = NULL;
  params_95->conv_extra.conv_mode = CSINN_DIRECT;
  params_95->pad_top = 0;
  params_95->pad_left = 0;
  params_95->pad_down = 0;
  params_95->pad_right = 0;
  params_95->base.name = "conv2d_Conv_78_PART_0_120_fuse_bias_add_Conv_78_121";
  params_95->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_93, output_95, kernel_95, bias_95, params_95);
  struct csinn_tensor *output_96 = csinn_alloc_tensor(sess);
  output_96->name = "output_96";
  output_96->dtype = CSINN_DTYPE_FLOAT16;
  output_96->layout = CSINN_LAYOUT_NCHW;
  output_96->dim[0] = 1;
  output_96->dim[1] = 960;
  output_96->dim[2] = 7;
  output_96->dim[3] = 7;
  output_96->dim_count = 4;
  output_96->qinfo = (struct csinn_quant_info *)(params_base + 1691232);
  output_96->quant_channel = 1;
  struct csinn_relu_params *params_96 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_96->n = 6;
  params_96->base.name = "clip_Clip_79_122";
  params_96->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_95, output_96, params_96);
  struct csinn_tensor *output_97 = csinn_alloc_tensor(sess);
  output_97->name = "output_97";
  output_97->dtype = CSINN_DTYPE_FLOAT16;
  output_97->layout = CSINN_LAYOUT_NCHW;
  output_97->dim[0] = 1;
  output_97->dim[1] = 960;
  output_97->dim[2] = 7;
  output_97->dim[3] = 7;
  output_97->dim_count = 4;
  output_97->qinfo = (struct csinn_quant_info *)(params_base + 1691256);
  output_97->quant_channel = 1;
  struct csinn_tensor *kernel_97 = csinn_alloc_tensor(sess);
  kernel_97->name = "kernel_97";
  kernel_97->data = params_base + 1691336;
  kernel_97->is_const = 1;
  kernel_97->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_97->dtype = CSINN_DTYPE_FLOAT16;
  kernel_97->layout = CSINN_LAYOUT_O1HW;
  kernel_97->dim[0] = 960;
  kernel_97->dim[1] = 1;
  kernel_97->dim[2] = 3;
  kernel_97->dim[3] = 3;
  kernel_97->dim_count = 4;
  kernel_97->qinfo = (struct csinn_quant_info *)(params_base + 1691280);
  kernel_97->quant_channel = 1;
  struct csinn_tensor *bias_97 = csinn_alloc_tensor(sess);
  bias_97->name = "bias_97";
  bias_97->data = params_base + 1708640;
  bias_97->is_const = 1;
  bias_97->dtype = CSINN_DTYPE_FLOAT16;
  bias_97->layout = CSINN_LAYOUT_O;
  bias_97->dim[0] = 960;
  bias_97->dim_count = 1;
  bias_97->qinfo = (struct csinn_quant_info *)(params_base + 1708616);
  bias_97->quant_channel = 1;
  struct csinn_conv2d_params *params_97 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_97->group = 960;
  params_97->stride_height = 1;
  params_97->stride_width = 1;
  params_97->dilation_height = 1;
  params_97->dilation_width = 1;
  params_97->conv_extra.kernel_tm = NULL;
  params_97->conv_extra.conv_mode = CSINN_DIRECT;
  params_97->pad_top = 1;
  params_97->pad_left = 1;
  params_97->pad_down = 1;
  params_97->pad_right = 1;
  params_97->base.name = "conv2d_Conv_80_PART_0_123_fuse_bias_add_Conv_80_124";
  params_97->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_96, output_97, kernel_97, bias_97, params_97);
  struct csinn_tensor *output_98 = csinn_alloc_tensor(sess);
  output_98->name = "output_98";
  output_98->dtype = CSINN_DTYPE_FLOAT16;
  output_98->layout = CSINN_LAYOUT_NCHW;
  output_98->dim[0] = 1;
  output_98->dim[1] = 960;
  output_98->dim[2] = 7;
  output_98->dim[3] = 7;
  output_98->dim_count = 4;
  output_98->qinfo = (struct csinn_quant_info *)(params_base + 1710560);
  output_98->quant_channel = 1;
  struct csinn_relu_params *params_98 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_98->n = 6;
  params_98->base.name = "clip_Clip_81_125";
  params_98->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_97, output_98, params_98);
  struct csinn_tensor *output_99 = csinn_alloc_tensor(sess);
  output_99->name = "output_99";
  output_99->dtype = CSINN_DTYPE_FLOAT16;
  output_99->layout = CSINN_LAYOUT_NCHW;
  output_99->dim[0] = 1;
  output_99->dim[1] = 160;
  output_99->dim[2] = 7;
  output_99->dim[3] = 7;
  output_99->dim_count = 4;
  output_99->qinfo = (struct csinn_quant_info *)(params_base + 1710584);
  output_99->quant_channel = 1;
  struct csinn_tensor *kernel_99 = csinn_alloc_tensor(sess);
  kernel_99->name = "kernel_99";
  kernel_99->data = params_base + 1710664;
  kernel_99->is_const = 1;
  kernel_99->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_99->dtype = CSINN_DTYPE_FLOAT16;
  kernel_99->layout = CSINN_LAYOUT_OIHW;
  kernel_99->dim[0] = 160;
  kernel_99->dim[1] = 960;
  kernel_99->dim[2] = 1;
  kernel_99->dim[3] = 1;
  kernel_99->dim_count = 4;
  kernel_99->qinfo = (struct csinn_quant_info *)(params_base + 1710608);
  kernel_99->quant_channel = 1;
  struct csinn_tensor *bias_99 = csinn_alloc_tensor(sess);
  bias_99->name = "bias_99";
  bias_99->data = params_base + 2017888;
  bias_99->is_const = 1;
  bias_99->dtype = CSINN_DTYPE_FLOAT16;
  bias_99->layout = CSINN_LAYOUT_O;
  bias_99->dim[0] = 160;
  bias_99->dim_count = 1;
  bias_99->qinfo = (struct csinn_quant_info *)(params_base + 2017864);
  bias_99->quant_channel = 1;
  struct csinn_conv2d_params *params_99 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_99->group = 1;
  params_99->stride_height = 1;
  params_99->stride_width = 1;
  params_99->dilation_height = 1;
  params_99->dilation_width = 1;
  params_99->conv_extra.kernel_tm = NULL;
  params_99->conv_extra.conv_mode = CSINN_DIRECT;
  params_99->pad_top = 0;
  params_99->pad_left = 0;
  params_99->pad_down = 0;
  params_99->pad_right = 0;
  params_99->base.name = "conv2d_Conv_82_PART_0_126_fuse_bias_add_Conv_82_127";
  params_99->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_98, output_99, kernel_99, bias_99, params_99);
  struct csinn_tensor *output_100 = csinn_alloc_tensor(sess);
  output_100->name = "output_100";
  output_100->dtype = CSINN_DTYPE_FLOAT16;
  output_100->layout = CSINN_LAYOUT_NCHW;
  output_100->dim[0] = 1;
  output_100->dim[1] = 160;
  output_100->dim[2] = 7;
  output_100->dim[3] = 7;
  output_100->dim_count = 4;
  output_100->qinfo = (struct csinn_quant_info *)(params_base + 2018208);
  output_100->quant_channel = 1;
  struct csinn_diso_params *params_100 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_100->base.name = "add_Add_83_128";
  params_100->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_93, output_99, output_100, params_100);
  struct csinn_tensor *output_103 = csinn_alloc_tensor(sess);
  output_103->name = "output_103";
  output_103->dtype = CSINN_DTYPE_FLOAT16;
  output_103->layout = CSINN_LAYOUT_NCHW;
  output_103->dim[0] = 1;
  output_103->dim[1] = 960;
  output_103->dim[2] = 7;
  output_103->dim[3] = 7;
  output_103->dim_count = 4;
  output_103->qinfo = (struct csinn_quant_info *)(params_base + 2018232);
  output_103->quant_channel = 1;
  struct csinn_tensor *kernel_103 = csinn_alloc_tensor(sess);
  kernel_103->name = "kernel_103";
  kernel_103->data = params_base + 2018312;
  kernel_103->is_const = 1;
  kernel_103->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_103->dtype = CSINN_DTYPE_FLOAT16;
  kernel_103->layout = CSINN_LAYOUT_OIHW;
  kernel_103->dim[0] = 960;
  kernel_103->dim[1] = 160;
  kernel_103->dim[2] = 1;
  kernel_103->dim[3] = 1;
  kernel_103->dim_count = 4;
  kernel_103->qinfo = (struct csinn_quant_info *)(params_base + 2018256);
  kernel_103->quant_channel = 1;
  struct csinn_tensor *bias_103 = csinn_alloc_tensor(sess);
  bias_103->name = "bias_103";
  bias_103->data = params_base + 2325536;
  bias_103->is_const = 1;
  bias_103->dtype = CSINN_DTYPE_FLOAT16;
  bias_103->layout = CSINN_LAYOUT_O;
  bias_103->dim[0] = 960;
  bias_103->dim_count = 1;
  bias_103->qinfo = (struct csinn_quant_info *)(params_base + 2325512);
  bias_103->quant_channel = 1;
  struct csinn_conv2d_params *params_103 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_103->group = 1;
  params_103->stride_height = 1;
  params_103->stride_width = 1;
  params_103->dilation_height = 1;
  params_103->dilation_width = 1;
  params_103->conv_extra.kernel_tm = NULL;
  params_103->conv_extra.conv_mode = CSINN_DIRECT;
  params_103->pad_top = 0;
  params_103->pad_left = 0;
  params_103->pad_down = 0;
  params_103->pad_right = 0;
  params_103->base.name = "conv2d_Conv_84_PART_0_129_fuse_bias_add_Conv_84_130";
  params_103->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_100, output_103, kernel_103, bias_103, params_103);
  struct csinn_tensor *output_104 = csinn_alloc_tensor(sess);
  output_104->name = "output_104";
  output_104->dtype = CSINN_DTYPE_FLOAT16;
  output_104->layout = CSINN_LAYOUT_NCHW;
  output_104->dim[0] = 1;
  output_104->dim[1] = 960;
  output_104->dim[2] = 7;
  output_104->dim[3] = 7;
  output_104->dim_count = 4;
  output_104->qinfo = (struct csinn_quant_info *)(params_base + 2327456);
  output_104->quant_channel = 1;
  struct csinn_relu_params *params_104 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_104->n = 6;
  params_104->base.name = "clip_Clip_85_131";
  params_104->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_103, output_104, params_104);
  struct csinn_tensor *output_105 = csinn_alloc_tensor(sess);
  output_105->name = "output_105";
  output_105->dtype = CSINN_DTYPE_FLOAT16;
  output_105->layout = CSINN_LAYOUT_NCHW;
  output_105->dim[0] = 1;
  output_105->dim[1] = 960;
  output_105->dim[2] = 7;
  output_105->dim[3] = 7;
  output_105->dim_count = 4;
  output_105->qinfo = (struct csinn_quant_info *)(params_base + 2327480);
  output_105->quant_channel = 1;
  struct csinn_tensor *kernel_105 = csinn_alloc_tensor(sess);
  kernel_105->name = "kernel_105";
  kernel_105->data = params_base + 2327560;
  kernel_105->is_const = 1;
  kernel_105->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_105->dtype = CSINN_DTYPE_FLOAT16;
  kernel_105->layout = CSINN_LAYOUT_O1HW;
  kernel_105->dim[0] = 960;
  kernel_105->dim[1] = 1;
  kernel_105->dim[2] = 3;
  kernel_105->dim[3] = 3;
  kernel_105->dim_count = 4;
  kernel_105->qinfo = (struct csinn_quant_info *)(params_base + 2327504);
  kernel_105->quant_channel = 1;
  struct csinn_tensor *bias_105 = csinn_alloc_tensor(sess);
  bias_105->name = "bias_105";
  bias_105->data = params_base + 2344864;
  bias_105->is_const = 1;
  bias_105->dtype = CSINN_DTYPE_FLOAT16;
  bias_105->layout = CSINN_LAYOUT_O;
  bias_105->dim[0] = 960;
  bias_105->dim_count = 1;
  bias_105->qinfo = (struct csinn_quant_info *)(params_base + 2344840);
  bias_105->quant_channel = 1;
  struct csinn_conv2d_params *params_105 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_105->group = 960;
  params_105->stride_height = 1;
  params_105->stride_width = 1;
  params_105->dilation_height = 1;
  params_105->dilation_width = 1;
  params_105->conv_extra.kernel_tm = NULL;
  params_105->conv_extra.conv_mode = CSINN_DIRECT;
  params_105->pad_top = 1;
  params_105->pad_left = 1;
  params_105->pad_down = 1;
  params_105->pad_right = 1;
  params_105->base.name = "conv2d_Conv_86_PART_0_132_fuse_bias_add_Conv_86_133";
  params_105->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_104, output_105, kernel_105, bias_105, params_105);
  struct csinn_tensor *output_106 = csinn_alloc_tensor(sess);
  output_106->name = "output_106";
  output_106->dtype = CSINN_DTYPE_FLOAT16;
  output_106->layout = CSINN_LAYOUT_NCHW;
  output_106->dim[0] = 1;
  output_106->dim[1] = 960;
  output_106->dim[2] = 7;
  output_106->dim[3] = 7;
  output_106->dim_count = 4;
  output_106->qinfo = (struct csinn_quant_info *)(params_base + 2346784);
  output_106->quant_channel = 1;
  struct csinn_relu_params *params_106 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_106->n = 6;
  params_106->base.name = "clip_Clip_87_134";
  params_106->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_105, output_106, params_106);
  struct csinn_tensor *output_107 = csinn_alloc_tensor(sess);
  output_107->name = "output_107";
  output_107->dtype = CSINN_DTYPE_FLOAT16;
  output_107->layout = CSINN_LAYOUT_NCHW;
  output_107->dim[0] = 1;
  output_107->dim[1] = 160;
  output_107->dim[2] = 7;
  output_107->dim[3] = 7;
  output_107->dim_count = 4;
  output_107->qinfo = (struct csinn_quant_info *)(params_base + 2346808);
  output_107->quant_channel = 1;
  struct csinn_tensor *kernel_107 = csinn_alloc_tensor(sess);
  kernel_107->name = "kernel_107";
  kernel_107->data = params_base + 2346888;
  kernel_107->is_const = 1;
  kernel_107->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_107->dtype = CSINN_DTYPE_FLOAT16;
  kernel_107->layout = CSINN_LAYOUT_OIHW;
  kernel_107->dim[0] = 160;
  kernel_107->dim[1] = 960;
  kernel_107->dim[2] = 1;
  kernel_107->dim[3] = 1;
  kernel_107->dim_count = 4;
  kernel_107->qinfo = (struct csinn_quant_info *)(params_base + 2346832);
  kernel_107->quant_channel = 1;
  struct csinn_tensor *bias_107 = csinn_alloc_tensor(sess);
  bias_107->name = "bias_107";
  bias_107->data = params_base + 2654112;
  bias_107->is_const = 1;
  bias_107->dtype = CSINN_DTYPE_FLOAT16;
  bias_107->layout = CSINN_LAYOUT_O;
  bias_107->dim[0] = 160;
  bias_107->dim_count = 1;
  bias_107->qinfo = (struct csinn_quant_info *)(params_base + 2654088);
  bias_107->quant_channel = 1;
  struct csinn_conv2d_params *params_107 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_107->group = 1;
  params_107->stride_height = 1;
  params_107->stride_width = 1;
  params_107->dilation_height = 1;
  params_107->dilation_width = 1;
  params_107->conv_extra.kernel_tm = NULL;
  params_107->conv_extra.conv_mode = CSINN_DIRECT;
  params_107->pad_top = 0;
  params_107->pad_left = 0;
  params_107->pad_down = 0;
  params_107->pad_right = 0;
  params_107->base.name = "conv2d_Conv_88_PART_0_135_fuse_bias_add_Conv_88_136";
  params_107->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_106, output_107, kernel_107, bias_107, params_107);
  struct csinn_tensor *output_108 = csinn_alloc_tensor(sess);
  output_108->name = "output_108";
  output_108->dtype = CSINN_DTYPE_FLOAT16;
  output_108->layout = CSINN_LAYOUT_NCHW;
  output_108->dim[0] = 1;
  output_108->dim[1] = 160;
  output_108->dim[2] = 7;
  output_108->dim[3] = 7;
  output_108->dim_count = 4;
  output_108->qinfo = (struct csinn_quant_info *)(params_base + 2654432);
  output_108->quant_channel = 1;
  struct csinn_diso_params *params_108 = csinn_alloc_params(sizeof(struct csinn_diso_params), sess);
  params_108->base.name = "add_Add_89_137";
  params_108->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_add_init(output_100, output_107, output_108, params_108);
  struct csinn_tensor *output_110 = csinn_alloc_tensor(sess);
  output_110->name = "output_110";
  output_110->dtype = CSINN_DTYPE_FLOAT16;
  output_110->layout = CSINN_LAYOUT_NCHW;
  output_110->dim[0] = 1;
  output_110->dim[1] = 960;
  output_110->dim[2] = 7;
  output_110->dim[3] = 7;
  output_110->dim_count = 4;
  output_110->qinfo = (struct csinn_quant_info *)(params_base + 2654456);
  output_110->quant_channel = 1;
  struct csinn_tensor *kernel_110 = csinn_alloc_tensor(sess);
  kernel_110->name = "kernel_110";
  kernel_110->data = params_base + 2654536;
  kernel_110->is_const = 1;
  kernel_110->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_110->dtype = CSINN_DTYPE_FLOAT16;
  kernel_110->layout = CSINN_LAYOUT_OIHW;
  kernel_110->dim[0] = 960;
  kernel_110->dim[1] = 160;
  kernel_110->dim[2] = 1;
  kernel_110->dim[3] = 1;
  kernel_110->dim_count = 4;
  kernel_110->qinfo = (struct csinn_quant_info *)(params_base + 2654480);
  kernel_110->quant_channel = 1;
  struct csinn_tensor *bias_110 = csinn_alloc_tensor(sess);
  bias_110->name = "bias_110";
  bias_110->data = params_base + 2961760;
  bias_110->is_const = 1;
  bias_110->dtype = CSINN_DTYPE_FLOAT16;
  bias_110->layout = CSINN_LAYOUT_O;
  bias_110->dim[0] = 960;
  bias_110->dim_count = 1;
  bias_110->qinfo = (struct csinn_quant_info *)(params_base + 2961736);
  bias_110->quant_channel = 1;
  struct csinn_conv2d_params *params_110 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_110->group = 1;
  params_110->stride_height = 1;
  params_110->stride_width = 1;
  params_110->dilation_height = 1;
  params_110->dilation_width = 1;
  params_110->conv_extra.kernel_tm = NULL;
  params_110->conv_extra.conv_mode = CSINN_DIRECT;
  params_110->pad_top = 0;
  params_110->pad_left = 0;
  params_110->pad_down = 0;
  params_110->pad_right = 0;
  params_110->base.name = "conv2d_Conv_90_PART_0_138_fuse_bias_add_Conv_90_139";
  params_110->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_108, output_110, kernel_110, bias_110, params_110);
  struct csinn_tensor *output_111 = csinn_alloc_tensor(sess);
  output_111->name = "output_111";
  output_111->dtype = CSINN_DTYPE_FLOAT16;
  output_111->layout = CSINN_LAYOUT_NCHW;
  output_111->dim[0] = 1;
  output_111->dim[1] = 960;
  output_111->dim[2] = 7;
  output_111->dim[3] = 7;
  output_111->dim_count = 4;
  output_111->qinfo = (struct csinn_quant_info *)(params_base + 2963680);
  output_111->quant_channel = 1;
  struct csinn_relu_params *params_111 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_111->n = 6;
  params_111->base.name = "clip_Clip_91_140";
  params_111->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_110, output_111, params_111);
  struct csinn_tensor *output_112 = csinn_alloc_tensor(sess);
  output_112->name = "output_112";
  output_112->dtype = CSINN_DTYPE_FLOAT16;
  output_112->layout = CSINN_LAYOUT_NCHW;
  output_112->dim[0] = 1;
  output_112->dim[1] = 960;
  output_112->dim[2] = 7;
  output_112->dim[3] = 7;
  output_112->dim_count = 4;
  output_112->qinfo = (struct csinn_quant_info *)(params_base + 2963704);
  output_112->quant_channel = 1;
  struct csinn_tensor *kernel_112 = csinn_alloc_tensor(sess);
  kernel_112->name = "kernel_112";
  kernel_112->data = params_base + 2963784;
  kernel_112->is_const = 1;
  kernel_112->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_112->dtype = CSINN_DTYPE_FLOAT16;
  kernel_112->layout = CSINN_LAYOUT_O1HW;
  kernel_112->dim[0] = 960;
  kernel_112->dim[1] = 1;
  kernel_112->dim[2] = 3;
  kernel_112->dim[3] = 3;
  kernel_112->dim_count = 4;
  kernel_112->qinfo = (struct csinn_quant_info *)(params_base + 2963728);
  kernel_112->quant_channel = 1;
  struct csinn_tensor *bias_112 = csinn_alloc_tensor(sess);
  bias_112->name = "bias_112";
  bias_112->data = params_base + 2981088;
  bias_112->is_const = 1;
  bias_112->dtype = CSINN_DTYPE_FLOAT16;
  bias_112->layout = CSINN_LAYOUT_O;
  bias_112->dim[0] = 960;
  bias_112->dim_count = 1;
  bias_112->qinfo = (struct csinn_quant_info *)(params_base + 2981064);
  bias_112->quant_channel = 1;
  struct csinn_conv2d_params *params_112 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_112->group = 960;
  params_112->stride_height = 1;
  params_112->stride_width = 1;
  params_112->dilation_height = 1;
  params_112->dilation_width = 1;
  params_112->conv_extra.kernel_tm = NULL;
  params_112->conv_extra.conv_mode = CSINN_DIRECT;
  params_112->pad_top = 1;
  params_112->pad_left = 1;
  params_112->pad_down = 1;
  params_112->pad_right = 1;
  params_112->base.name = "conv2d_Conv_92_PART_0_141_fuse_bias_add_Conv_92_142";
  params_112->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_111, output_112, kernel_112, bias_112, params_112);
  struct csinn_tensor *output_113 = csinn_alloc_tensor(sess);
  output_113->name = "output_113";
  output_113->dtype = CSINN_DTYPE_FLOAT16;
  output_113->layout = CSINN_LAYOUT_NCHW;
  output_113->dim[0] = 1;
  output_113->dim[1] = 960;
  output_113->dim[2] = 7;
  output_113->dim[3] = 7;
  output_113->dim_count = 4;
  output_113->qinfo = (struct csinn_quant_info *)(params_base + 2983008);
  output_113->quant_channel = 1;
  struct csinn_relu_params *params_113 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_113->n = 6;
  params_113->base.name = "clip_Clip_93_143";
  params_113->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_112, output_113, params_113);
  struct csinn_tensor *output_114 = csinn_alloc_tensor(sess);
  output_114->name = "output_114";
  output_114->dtype = CSINN_DTYPE_FLOAT16;
  output_114->layout = CSINN_LAYOUT_NCHW;
  output_114->dim[0] = 1;
  output_114->dim[1] = 320;
  output_114->dim[2] = 7;
  output_114->dim[3] = 7;
  output_114->dim_count = 4;
  output_114->qinfo = (struct csinn_quant_info *)(params_base + 2983032);
  output_114->quant_channel = 1;
  struct csinn_tensor *kernel_114 = csinn_alloc_tensor(sess);
  kernel_114->name = "kernel_114";
  kernel_114->data = params_base + 2983112;
  kernel_114->is_const = 1;
  kernel_114->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_114->dtype = CSINN_DTYPE_FLOAT16;
  kernel_114->layout = CSINN_LAYOUT_OIHW;
  kernel_114->dim[0] = 320;
  kernel_114->dim[1] = 960;
  kernel_114->dim[2] = 1;
  kernel_114->dim[3] = 1;
  kernel_114->dim_count = 4;
  kernel_114->qinfo = (struct csinn_quant_info *)(params_base + 2983056);
  kernel_114->quant_channel = 1;
  struct csinn_tensor *bias_114 = csinn_alloc_tensor(sess);
  bias_114->name = "bias_114";
  bias_114->data = params_base + 3597536;
  bias_114->is_const = 1;
  bias_114->dtype = CSINN_DTYPE_FLOAT16;
  bias_114->layout = CSINN_LAYOUT_O;
  bias_114->dim[0] = 320;
  bias_114->dim_count = 1;
  bias_114->qinfo = (struct csinn_quant_info *)(params_base + 3597512);
  bias_114->quant_channel = 1;
  struct csinn_conv2d_params *params_114 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_114->group = 1;
  params_114->stride_height = 1;
  params_114->stride_width = 1;
  params_114->dilation_height = 1;
  params_114->dilation_width = 1;
  params_114->conv_extra.kernel_tm = NULL;
  params_114->conv_extra.conv_mode = CSINN_DIRECT;
  params_114->pad_top = 0;
  params_114->pad_left = 0;
  params_114->pad_down = 0;
  params_114->pad_right = 0;
  params_114->base.name = "conv2d_Conv_94_PART_0_144_fuse_bias_add_Conv_94_145";
  params_114->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_113, output_114, kernel_114, bias_114, params_114);
  struct csinn_tensor *output_115 = csinn_alloc_tensor(sess);
  output_115->name = "output_115";
  output_115->dtype = CSINN_DTYPE_FLOAT16;
  output_115->layout = CSINN_LAYOUT_NCHW;
  output_115->dim[0] = 1;
  output_115->dim[1] = 1280;
  output_115->dim[2] = 7;
  output_115->dim[3] = 7;
  output_115->dim_count = 4;
  output_115->qinfo = (struct csinn_quant_info *)(params_base + 3598176);
  output_115->quant_channel = 1;
  struct csinn_tensor *kernel_115 = csinn_alloc_tensor(sess);
  kernel_115->name = "kernel_115";
  kernel_115->data = params_base + 3598248;
  kernel_115->is_const = 1;
  kernel_115->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_115->dtype = CSINN_DTYPE_FLOAT16;
  kernel_115->layout = CSINN_LAYOUT_OIHW;
  kernel_115->dim[0] = 1280;
  kernel_115->dim[1] = 320;
  kernel_115->dim[2] = 1;
  kernel_115->dim[3] = 1;
  kernel_115->dim_count = 4;
  kernel_115->qinfo = (struct csinn_quant_info *)(params_base + 3598200);
  kernel_115->quant_channel = 1;
  struct csinn_tensor *bias_115 = csinn_alloc_tensor(sess);
  bias_115->name = "bias_115";
  bias_115->data = params_base + 4417472;
  bias_115->is_const = 1;
  bias_115->dtype = CSINN_DTYPE_FLOAT16;
  bias_115->layout = CSINN_LAYOUT_O;
  bias_115->dim[0] = 1280;
  bias_115->dim_count = 1;
  bias_115->qinfo = (struct csinn_quant_info *)(params_base + 4417448);
  bias_115->quant_channel = 1;
  struct csinn_conv2d_params *params_115 = csinn_alloc_params(sizeof(struct csinn_conv2d_params), sess);
  params_115->group = 1;
  params_115->stride_height = 1;
  params_115->stride_width = 1;
  params_115->dilation_height = 1;
  params_115->dilation_width = 1;
  params_115->conv_extra.kernel_tm = NULL;
  params_115->conv_extra.conv_mode = CSINN_DIRECT;
  params_115->pad_top = 0;
  params_115->pad_left = 0;
  params_115->pad_down = 0;
  params_115->pad_right = 0;
  params_115->base.name = "conv2d_Conv_95_PART_0_146_fuse_bias_add_Conv_95_147";
  params_115->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_conv2d_init(output_114, output_115, kernel_115, bias_115, params_115);
  struct csinn_tensor *output_116 = csinn_alloc_tensor(sess);
  output_116->name = "output_116";
  output_116->dtype = CSINN_DTYPE_FLOAT16;
  output_116->layout = CSINN_LAYOUT_NCHW;
  output_116->dim[0] = 1;
  output_116->dim[1] = 1280;
  output_116->dim[2] = 7;
  output_116->dim[3] = 7;
  output_116->dim_count = 4;
  output_116->qinfo = (struct csinn_quant_info *)(params_base + 4420032);
  output_116->quant_channel = 1;
  struct csinn_relu_params *params_116 = csinn_alloc_params(sizeof(struct csinn_relu_params), sess);
  params_116->n = 6;
  params_116->base.name = "clip_Clip_96_148";
  params_116->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_relu6_init(output_115, output_116, params_116);
  struct csinn_tensor *output_117 = csinn_alloc_tensor(sess);
  output_117->name = "output_117";
  output_117->dtype = CSINN_DTYPE_FLOAT16;
  output_117->layout = CSINN_LAYOUT_NCHW;
  output_117->dim[0] = 1;
  output_117->dim[1] = 1280;
  output_117->dim[2] = 1;
  output_117->dim[3] = 1;
  output_117->dim_count = 4;
  output_117->qinfo = (struct csinn_quant_info *)(params_base + 4420056);
  output_117->quant_channel = 1;
  struct csinn_pool_params *params_117 = csinn_alloc_params(sizeof(struct csinn_pool_params), sess);
  params_117->base.name = "global_avg_pool2d_GlobalAveragePool_97_149";
  params_117->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_global_avgpool2d_init(output_116, output_117, params_117);
  int32_t *shape_118 = malloc(2 * 4);
  shape_118[0] = 1;
  shape_118[1] = -1;
  struct csinn_tensor *output_118 = csinn_alloc_tensor(sess);
  output_118->name = "output_118";
  output_118->dtype = CSINN_DTYPE_FLOAT16;
  output_118->layout = CSINN_LAYOUT_NC;
  output_118->dim[0] = 1;
  output_118->dim[1] = 1280;
  output_118->dim_count = 2;
  output_118->qinfo = (struct csinn_quant_info *)(params_base + 4420080);
  output_118->quant_channel = 1;
  struct csinn_reshape_params *params_118 = csinn_alloc_params(sizeof(struct csinn_reshape_params), sess);
  params_118->shape = shape_118;
  params_118->shape_num = 2;
  params_118->base.name = "reshape_Reshape_103_150";
  params_118->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_reshape_init(output_117, output_118, params_118);
  struct csinn_tensor *output_119 = csinn_alloc_tensor(sess);
  output_119->name = "dense_Gemm_104_PART_0_151_fuse_add_output@@Gemm_104_152_119";
  output_119->dtype = CSINN_DTYPE_FLOAT16;
  output_119->layout = CSINN_LAYOUT_NC;
  output_119->dim[0] = 1;
  output_119->dim[1] = 1000;
  output_119->dim_count = 2;
  output_119->qinfo = (struct csinn_quant_info *)(params_base + 4420104);
  output_119->quant_channel = 1;
  struct csinn_tensor *kernel_119 = csinn_alloc_tensor(sess);
  kernel_119->name = "kernel_119";
  kernel_119->data = params_base + 4420168;
  kernel_119->is_const = 1;
  kernel_119->mtype = CSINN_MEM_TYPE_CPU_ALIGNED;
  kernel_119->dtype = CSINN_DTYPE_FLOAT16;
  kernel_119->layout = CSINN_LAYOUT_OI;
  kernel_119->dim[0] = 1000;
  kernel_119->dim[1] = 1280;
  kernel_119->dim_count = 2;
  kernel_119->qinfo = (struct csinn_quant_info *)(params_base + 4420128);
  kernel_119->quant_channel = 1;
  struct csinn_tensor *bias_119 = csinn_alloc_tensor(sess);
  bias_119->name = "bias_119";
  bias_119->data = params_base + 6980192;
  bias_119->is_const = 1;
  bias_119->dtype = CSINN_DTYPE_FLOAT16;
  bias_119->layout = CSINN_LAYOUT_O;
  bias_119->dim[0] = 1000;
  bias_119->dim_count = 1;
  bias_119->qinfo = (struct csinn_quant_info *)(params_base + 6980168);
  bias_119->quant_channel = 1;
  struct csinn_fc_params *params_119 = csinn_alloc_params(sizeof(struct csinn_fc_params), sess);
  params_119->units = 1000;
  params_119->base.name = "dense_Gemm_104_PART_0_151_fuse_add_output@@Gemm_104_152";
  params_119->base.quant_type = CSINN_QUANT_FLOAT16;
  csinn_fullyconnected_init(output_118, output_119, kernel_119, bias_119, params_119);
  csinn_set_tensor_entry(input, sess);
  csinn_set_input(0, input, sess);

  csinn_conv2d(input, output_0, kernel_0, bias_0, params_0);
  csinn_relu6(output_0, output_1, params_1);
  csinn_conv2d(output_1, output_2, kernel_2, bias_2, params_2);
  csinn_relu6(output_2, output_3, params_3);
  csinn_conv2d(output_3, output_4, kernel_4, bias_4, params_4);
  csinn_conv2d(output_4, output_5, kernel_5, bias_5, params_5);
  csinn_relu6(output_5, output_6, params_6);
  csinn_conv2d(output_6, output_7, kernel_7, bias_7, params_7);
  csinn_relu6(output_7, output_8, params_8);
  csinn_conv2d(output_8, output_9, kernel_9, bias_9, params_9);
  csinn_conv2d(output_9, output_11, kernel_11, bias_11, params_11);
  csinn_relu6(output_11, output_12, params_12);
  csinn_conv2d(output_12, output_13, kernel_13, bias_13, params_13);
  csinn_relu6(output_13, output_14, params_14);
  csinn_conv2d(output_14, output_15, kernel_15, bias_15, params_15);
  csinn_add(output_9, output_15, output_16, params_16);
  csinn_conv2d(output_16, output_18, kernel_18, bias_18, params_18);
  csinn_relu6(output_18, output_19, params_19);
  csinn_conv2d(output_19, output_20, kernel_20, bias_20, params_20);
  csinn_relu6(output_20, output_21, params_21);
  csinn_conv2d(output_21, output_22, kernel_22, bias_22, params_22);
  csinn_conv2d(output_22, output_24, kernel_24, bias_24, params_24);
  csinn_relu6(output_24, output_25, params_25);
  csinn_conv2d(output_25, output_26, kernel_26, bias_26, params_26);
  csinn_relu6(output_26, output_27, params_27);
  csinn_conv2d(output_27, output_28, kernel_28, bias_28, params_28);
  csinn_add(output_22, output_28, output_29, params_29);
  csinn_conv2d(output_29, output_32, kernel_32, bias_32, params_32);
  csinn_relu6(output_32, output_33, params_33);
  csinn_conv2d(output_33, output_34, kernel_34, bias_34, params_34);
  csinn_relu6(output_34, output_35, params_35);
  csinn_conv2d(output_35, output_36, kernel_36, bias_36, params_36);
  csinn_add(output_29, output_36, output_37, params_37);
  csinn_conv2d(output_37, output_39, kernel_39, bias_39, params_39);
  csinn_relu6(output_39, output_40, params_40);
  csinn_conv2d(output_40, output_41, kernel_41, bias_41, params_41);
  csinn_relu6(output_41, output_42, params_42);
  csinn_conv2d(output_42, output_43, kernel_43, bias_43, params_43);
  csinn_conv2d(output_43, output_45, kernel_45, bias_45, params_45);
  csinn_relu6(output_45, output_46, params_46);
  csinn_conv2d(output_46, output_47, kernel_47, bias_47, params_47);
  csinn_relu6(output_47, output_48, params_48);
  csinn_conv2d(output_48, output_49, kernel_49, bias_49, params_49);
  csinn_add(output_43, output_49, output_50, params_50);
  csinn_conv2d(output_50, output_53, kernel_53, bias_53, params_53);
  csinn_relu6(output_53, output_54, params_54);
  csinn_conv2d(output_54, output_55, kernel_55, bias_55, params_55);
  csinn_relu6(output_55, output_56, params_56);
  csinn_conv2d(output_56, output_57, kernel_57, bias_57, params_57);
  csinn_add(output_50, output_57, output_58, params_58);
  csinn_conv2d(output_58, output_61, kernel_61, bias_61, params_61);
  csinn_relu6(output_61, output_62, params_62);
  csinn_conv2d(output_62, output_63, kernel_63, bias_63, params_63);
  csinn_relu6(output_63, output_64, params_64);
  csinn_conv2d(output_64, output_65, kernel_65, bias_65, params_65);
  csinn_add(output_58, output_65, output_66, params_66);
  csinn_conv2d(output_66, output_68, kernel_68, bias_68, params_68);
  csinn_relu6(output_68, output_69, params_69);
  csinn_conv2d(output_69, output_70, kernel_70, bias_70, params_70);
  csinn_relu6(output_70, output_71, params_71);
  csinn_conv2d(output_71, output_72, kernel_72, bias_72, params_72);
  csinn_conv2d(output_72, output_74, kernel_74, bias_74, params_74);
  csinn_relu6(output_74, output_75, params_75);
  csinn_conv2d(output_75, output_76, kernel_76, bias_76, params_76);
  csinn_relu6(output_76, output_77, params_77);
  csinn_conv2d(output_77, output_78, kernel_78, bias_78, params_78);
  csinn_add(output_72, output_78, output_79, params_79);
  csinn_conv2d(output_79, output_82, kernel_82, bias_82, params_82);
  csinn_relu6(output_82, output_83, params_83);
  csinn_conv2d(output_83, output_84, kernel_84, bias_84, params_84);
  csinn_relu6(output_84, output_85, params_85);
  csinn_conv2d(output_85, output_86, kernel_86, bias_86, params_86);
  csinn_add(output_79, output_86, output_87, params_87);
  csinn_conv2d(output_87, output_89, kernel_89, bias_89, params_89);
  csinn_relu6(output_89, output_90, params_90);
  csinn_conv2d(output_90, output_91, kernel_91, bias_91, params_91);
  csinn_relu6(output_91, output_92, params_92);
  csinn_conv2d(output_92, output_93, kernel_93, bias_93, params_93);
  csinn_conv2d(output_93, output_95, kernel_95, bias_95, params_95);
  csinn_relu6(output_95, output_96, params_96);
  csinn_conv2d(output_96, output_97, kernel_97, bias_97, params_97);
  csinn_relu6(output_97, output_98, params_98);
  csinn_conv2d(output_98, output_99, kernel_99, bias_99, params_99);
  csinn_add(output_93, output_99, output_100, params_100);
  csinn_conv2d(output_100, output_103, kernel_103, bias_103, params_103);
  csinn_relu6(output_103, output_104, params_104);
  csinn_conv2d(output_104, output_105, kernel_105, bias_105, params_105);
  csinn_relu6(output_105, output_106, params_106);
  csinn_conv2d(output_106, output_107, kernel_107, bias_107, params_107);
  csinn_add(output_100, output_107, output_108, params_108);
  csinn_conv2d(output_108, output_110, kernel_110, bias_110, params_110);
  csinn_relu6(output_110, output_111, params_111);
  csinn_conv2d(output_111, output_112, kernel_112, bias_112, params_112);
  csinn_relu6(output_112, output_113, params_113);
  csinn_conv2d(output_113, output_114, kernel_114, bias_114, params_114);
  csinn_conv2d(output_114, output_115, kernel_115, bias_115, params_115);
  csinn_relu6(output_115, output_116, params_116);
  csinn_global_avgpool2d(output_116, output_117, params_117);
  csinn_reshape(output_117, output_118, params_118);
  csinn_fullyconnected(output_118, output_119, kernel_119, bias_119, params_119);
  csinn_set_output(0, output_119, sess);

  csinn_session_setup(sess);
  return sess;
}
void csinn_update_input_and_run(struct csinn_tensor **input_tensors , void *sess) {
  csinn_update_input(0, input_tensors[0], sess);
  csinn_session_run(sess);
}
