diff --git "a/openai_whisper-large-v2/AudioEncoder.mlmodelc/model.mil" "b/openai_whisper-large-v2/AudioEncoder.mlmodelc/model.mil" new file mode 100644--- /dev/null +++ "b/openai_whisper-large-v2/AudioEncoder.mlmodelc/model.mil" @@ -0,0 +1,3086 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})] +{ + func main(tensor melspectrogram_features) { + tensor var_90 = const()[name = tensor("op_90"), val = tensor([1, 1])]; + tensor var_96 = const()[name = tensor("op_96"), val = tensor([1, 1])]; + tensor var_101 = const()[name = tensor("op_101"), val = tensor(1)]; + tensor var_106_pad_type_0 = const()[name = tensor("op_106_pad_type_0"), val = tensor("custom")]; + tensor var_106_pad_0 = const()[name = tensor("op_106_pad_0"), val = tensor([0, 0, 1, 1])]; + tensor var_81_to_fp16 = const()[name = tensor("op_81_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor var_87_to_fp16 = const()[name = tensor("op_87_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614528)))]; + tensor var_106_cast_fp16 = conv(bias = var_87_to_fp16, dilations = var_96, groups = var_101, pad = var_106_pad_0, pad_type = var_106_pad_type_0, strides = var_90, weight = var_81_to_fp16, x = melspectrogram_features)[name = tensor("op_106_cast_fp16")]; + tensor hidden_states_1_mode_0 = const()[name = tensor("hidden_states_1_mode_0"), val = tensor("EXACT")]; + tensor hidden_states_1_cast_fp16 = gelu(mode = hidden_states_1_mode_0, x = var_106_cast_fp16)[name = tensor("hidden_states_1_cast_fp16")]; + tensor var_130 = const()[name = tensor("op_130"), val = tensor([2, 2])]; + tensor var_136 = const()[name = tensor("op_136"), val = tensor([1, 1])]; + tensor var_141 = const()[name = tensor("op_141"), val = tensor(1)]; + tensor var_146_pad_type_0 = const()[name = tensor("op_146_pad_type_0"), val = tensor("custom")]; + tensor var_146_pad_0 = const()[name = tensor("op_146_pad_0"), val = tensor([0, 0, 1, 1])]; + tensor var_121_to_fp16 = const()[name = tensor("op_121_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617152)))]; + tensor var_127_to_fp16 = const()[name = tensor("op_127_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10447616)))]; + tensor var_146_cast_fp16 = conv(bias = var_127_to_fp16, dilations = var_136, groups = var_141, pad = var_146_pad_0, pad_type = var_146_pad_type_0, strides = var_130, weight = var_121_to_fp16, x = hidden_states_1_cast_fp16)[name = tensor("op_146_cast_fp16")]; + tensor hidden_states_3_mode_0 = const()[name = tensor("hidden_states_3_mode_0"), val = tensor("EXACT")]; + tensor hidden_states_3_cast_fp16 = gelu(mode = hidden_states_3_mode_0, x = var_146_cast_fp16)[name = tensor("hidden_states_3_cast_fp16")]; + tensor var_164_to_fp16 = const()[name = tensor("op_164_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10450240)))]; + tensor inputs_1_cast_fp16 = add(x = hidden_states_3_cast_fp16, y = var_164_to_fp16)[name = tensor("inputs_1_cast_fp16")]; + tensor var_178 = const()[name = tensor("op_178"), val = tensor(3)]; + tensor var_180 = const()[name = tensor("op_180"), val = tensor(1)]; + tensor var_181 = const()[name = tensor("op_181"), val = tensor(true)]; + tensor var_191 = const()[name = tensor("op_191"), val = tensor([1])]; + tensor channels_mean_1_cast_fp16 = reduce_mean(axes = var_191, keep_dims = var_181, x = inputs_1_cast_fp16)[name = tensor("channels_mean_1_cast_fp16")]; + tensor zero_mean_1_cast_fp16 = sub(x = inputs_1_cast_fp16, y = channels_mean_1_cast_fp16)[name = tensor("zero_mean_1_cast_fp16")]; + tensor zero_mean_sq_1_cast_fp16 = mul(x = zero_mean_1_cast_fp16, y = zero_mean_1_cast_fp16)[name = tensor("zero_mean_sq_1_cast_fp16")]; + tensor var_195 = const()[name = tensor("op_195"), val = tensor([1])]; + tensor var_196_cast_fp16 = reduce_mean(axes = var_195, keep_dims = var_181, x = zero_mean_sq_1_cast_fp16)[name = tensor("op_196_cast_fp16")]; + tensor var_197_to_fp16 = const()[name = tensor("op_197_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_198_cast_fp16 = add(x = var_196_cast_fp16, y = var_197_to_fp16)[name = tensor("op_198_cast_fp16")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast_fp16 = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_198_cast_fp16)[name = tensor("denom_1_cast_fp16")]; + tensor out_1_cast_fp16 = mul(x = zero_mean_1_cast_fp16, y = denom_1_cast_fp16)[name = tensor("out_1_cast_fp16")]; + tensor obj_1_mean_0_to_fp16 = const()[name = tensor("obj_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14290304)))]; + tensor obj_1_variance_0_to_fp16 = const()[name = tensor("obj_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14292928)))]; + tensor obj_1_gamma_0_to_fp16 = const()[name = tensor("obj_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14295552)))]; + tensor obj_1_beta_0_to_fp16 = const()[name = tensor("obj_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14298176)))]; + tensor obj_1_epsilon_0_to_fp16 = const()[name = tensor("obj_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor("obj_1_cast_fp16")]; + tensor var_213 = const()[name = tensor("op_213"), val = tensor([1, 1])]; + tensor var_215 = const()[name = tensor("op_215"), val = tensor([1, 1])]; + tensor query_1_pad_type_0 = const()[name = tensor("query_1_pad_type_0"), val = tensor("custom")]; + tensor query_1_pad_0 = const()[name = tensor("query_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14300800)))]; + tensor layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17577664)))]; + tensor query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = var_215, groups = var_180, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = var_213, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("query_1_cast_fp16")]; + tensor var_219 = const()[name = tensor("op_219"), val = tensor([1, 1])]; + tensor var_221 = const()[name = tensor("op_221"), val = tensor([1, 1])]; + tensor key_1_pad_type_0 = const()[name = tensor("key_1_pad_type_0"), val = tensor("custom")]; + tensor key_1_pad_0 = const()[name = tensor("key_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17580288)))]; + tensor key_1_cast_fp16 = conv(dilations = var_221, groups = var_180, pad = key_1_pad_0, pad_type = key_1_pad_type_0, strides = var_219, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("key_1_cast_fp16")]; + tensor var_226 = const()[name = tensor("op_226"), val = tensor([1, 1])]; + tensor var_228 = const()[name = tensor("op_228"), val = tensor([1, 1])]; + tensor value_1_pad_type_0 = const()[name = tensor("value_1_pad_type_0"), val = tensor("custom")]; + tensor value_1_pad_0 = const()[name = tensor("value_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20857152)))]; + tensor layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24134016)))]; + tensor value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = var_228, groups = var_180, pad = value_1_pad_0, pad_type = value_1_pad_type_0, strides = var_226, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("value_1_cast_fp16")]; + tensor var_232 = const()[name = tensor("op_232"), val = tensor([1, 20, 64, -1])]; + tensor var_233_cast_fp16 = reshape(shape = var_232, x = query_1_cast_fp16)[name = tensor("op_233_cast_fp16")]; + tensor var_234_to_fp16 = const()[name = tensor("op_234_to_fp16"), val = tensor(0x1p-3)]; + tensor var_235_cast_fp16 = mul(x = var_233_cast_fp16, y = var_234_to_fp16)[name = tensor("op_235_cast_fp16")]; + tensor var_236 = const()[name = tensor("op_236"), val = tensor([1, 20, 64, -1])]; + tensor var_237_cast_fp16 = reshape(shape = var_236, x = key_1_cast_fp16)[name = tensor("op_237_cast_fp16")]; + tensor mh_w_1_transpose_x_0 = const()[name = tensor("mh_w_1_transpose_x_0"), val = tensor(true)]; + tensor mh_w_1_transpose_y_0 = const()[name = tensor("mh_w_1_transpose_y_0"), val = tensor(false)]; + tensor mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_235_cast_fp16, y = var_237_cast_fp16)[name = tensor("mh_w_1_cast_fp16")]; + tensor var_240_cast_fp16 = softmax(axis = var_178, x = mh_w_1_cast_fp16)[name = tensor("op_240_cast_fp16")]; + tensor var_241 = const()[name = tensor("op_241"), val = tensor([1, 20, 64, -1])]; + tensor var_242_cast_fp16 = reshape(shape = var_241, x = value_1_cast_fp16)[name = tensor("op_242_cast_fp16")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_242_cast_fp16, y = var_240_cast_fp16)[name = tensor("attn_1_cast_fp16")]; + tensor var_245 = const()[name = tensor("op_245"), val = tensor([1, 1280, 1, -1])]; + tensor input_1_cast_fp16 = reshape(shape = var_245, x = attn_1_cast_fp16)[name = tensor("input_1_cast_fp16")]; + tensor var_249 = const()[name = tensor("op_249"), val = tensor([1, 1])]; + tensor var_251 = const()[name = tensor("op_251"), val = tensor([1, 1])]; + tensor obj_3_pad_type_0 = const()[name = tensor("obj_3_pad_type_0"), val = tensor("custom")]; + tensor obj_3_pad_0 = const()[name = tensor("obj_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24136640)))]; + tensor layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27413504)))]; + tensor obj_3_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = var_251, groups = var_180, pad = obj_3_pad_0, pad_type = obj_3_pad_type_0, strides = var_249, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor("obj_3_cast_fp16")]; + tensor inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_3_cast_fp16)[name = tensor("inputs_3_cast_fp16")]; + tensor var_257 = const()[name = tensor("op_257"), val = tensor([1])]; + tensor channels_mean_3_cast_fp16 = reduce_mean(axes = var_257, keep_dims = var_181, x = inputs_3_cast_fp16)[name = tensor("channels_mean_3_cast_fp16")]; + tensor zero_mean_3_cast_fp16 = sub(x = inputs_3_cast_fp16, y = channels_mean_3_cast_fp16)[name = tensor("zero_mean_3_cast_fp16")]; + tensor zero_mean_sq_3_cast_fp16 = mul(x = zero_mean_3_cast_fp16, y = zero_mean_3_cast_fp16)[name = tensor("zero_mean_sq_3_cast_fp16")]; + tensor var_261 = const()[name = tensor("op_261"), val = tensor([1])]; + tensor var_262_cast_fp16 = reduce_mean(axes = var_261, keep_dims = var_181, x = zero_mean_sq_3_cast_fp16)[name = tensor("op_262_cast_fp16")]; + tensor var_263_to_fp16 = const()[name = tensor("op_263_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_264_cast_fp16 = add(x = var_262_cast_fp16, y = var_263_to_fp16)[name = tensor("op_264_cast_fp16")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast_fp16 = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_264_cast_fp16)[name = tensor("denom_3_cast_fp16")]; + tensor out_3_cast_fp16 = mul(x = zero_mean_3_cast_fp16, y = denom_3_cast_fp16)[name = tensor("out_3_cast_fp16")]; + tensor input_3_gamma_0_to_fp16 = const()[name = tensor("input_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27416128)))]; + tensor input_3_beta_0_to_fp16 = const()[name = tensor("input_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27418752)))]; + tensor input_3_epsilon_0_to_fp16 = const()[name = tensor("input_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_3_cast_fp16 = batch_norm(beta = input_3_beta_0_to_fp16, epsilon = input_3_epsilon_0_to_fp16, gamma = input_3_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor("input_3_cast_fp16")]; + tensor var_275 = const()[name = tensor("op_275"), val = tensor([1, 1])]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor([1, 1])]; + tensor input_5_pad_type_0 = const()[name = tensor("input_5_pad_type_0"), val = tensor("custom")]; + tensor input_5_pad_0 = const()[name = tensor("input_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_fc1_weight_to_fp16 = const()[name = tensor("layers_0_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27421376)))]; + tensor layers_0_fc1_bias_to_fp16 = const()[name = tensor("layers_0_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40528640)))]; + tensor input_5_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = var_277, groups = var_180, pad = input_5_pad_0, pad_type = input_5_pad_type_0, strides = var_275, weight = layers_0_fc1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor("input_5_cast_fp16")]; + tensor input_7_mode_0 = const()[name = tensor("input_7_mode_0"), val = tensor("EXACT")]; + tensor input_7_cast_fp16 = gelu(mode = input_7_mode_0, x = input_5_cast_fp16)[name = tensor("input_7_cast_fp16")]; + tensor var_283 = const()[name = tensor("op_283"), val = tensor([1, 1])]; + tensor var_285 = const()[name = tensor("op_285"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_0_fc2_weight_to_fp16 = const()[name = tensor("layers_0_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40538944)))]; + tensor layers_0_fc2_bias_to_fp16 = const()[name = tensor("layers_0_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53646208)))]; + tensor hidden_states_5_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = var_285, groups = var_180, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_283, weight = layers_0_fc2_weight_to_fp16, x = input_7_cast_fp16)[name = tensor("hidden_states_5_cast_fp16")]; + tensor inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor("inputs_5_cast_fp16")]; + tensor var_296 = const()[name = tensor("op_296"), val = tensor(3)]; + tensor var_298 = const()[name = tensor("op_298"), val = tensor(1)]; + tensor var_299 = const()[name = tensor("op_299"), val = tensor(true)]; + tensor var_309 = const()[name = tensor("op_309"), val = tensor([1])]; + tensor channels_mean_5_cast_fp16 = reduce_mean(axes = var_309, keep_dims = var_299, x = inputs_5_cast_fp16)[name = tensor("channels_mean_5_cast_fp16")]; + tensor zero_mean_5_cast_fp16 = sub(x = inputs_5_cast_fp16, y = channels_mean_5_cast_fp16)[name = tensor("zero_mean_5_cast_fp16")]; + tensor zero_mean_sq_5_cast_fp16 = mul(x = zero_mean_5_cast_fp16, y = zero_mean_5_cast_fp16)[name = tensor("zero_mean_sq_5_cast_fp16")]; + tensor var_313 = const()[name = tensor("op_313"), val = tensor([1])]; + tensor var_314_cast_fp16 = reduce_mean(axes = var_313, keep_dims = var_299, x = zero_mean_sq_5_cast_fp16)[name = tensor("op_314_cast_fp16")]; + tensor var_315_to_fp16 = const()[name = tensor("op_315_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_316_cast_fp16 = add(x = var_314_cast_fp16, y = var_315_to_fp16)[name = tensor("op_316_cast_fp16")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast_fp16 = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_316_cast_fp16)[name = tensor("denom_5_cast_fp16")]; + tensor out_5_cast_fp16 = mul(x = zero_mean_5_cast_fp16, y = denom_5_cast_fp16)[name = tensor("out_5_cast_fp16")]; + tensor obj_5_gamma_0_to_fp16 = const()[name = tensor("obj_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53648832)))]; + tensor obj_5_beta_0_to_fp16 = const()[name = tensor("obj_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53651456)))]; + tensor obj_5_epsilon_0_to_fp16 = const()[name = tensor("obj_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor("obj_5_cast_fp16")]; + tensor var_331 = const()[name = tensor("op_331"), val = tensor([1, 1])]; + tensor var_333 = const()[name = tensor("op_333"), val = tensor([1, 1])]; + tensor query_3_pad_type_0 = const()[name = tensor("query_3_pad_type_0"), val = tensor("custom")]; + tensor query_3_pad_0 = const()[name = tensor("query_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53654080)))]; + tensor layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56930944)))]; + tensor query_3_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = var_333, groups = var_298, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = var_331, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor("query_3_cast_fp16")]; + tensor var_337 = const()[name = tensor("op_337"), val = tensor([1, 1])]; + tensor var_339 = const()[name = tensor("op_339"), val = tensor([1, 1])]; + tensor key_3_pad_type_0 = const()[name = tensor("key_3_pad_type_0"), val = tensor("custom")]; + tensor key_3_pad_0 = const()[name = tensor("key_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56933568)))]; + tensor key_3_cast_fp16 = conv(dilations = var_339, groups = var_298, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = var_337, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor("key_3_cast_fp16")]; + tensor var_344 = const()[name = tensor("op_344"), val = tensor([1, 1])]; + tensor var_346 = const()[name = tensor("op_346"), val = tensor([1, 1])]; + tensor value_3_pad_type_0 = const()[name = tensor("value_3_pad_type_0"), val = tensor("custom")]; + tensor value_3_pad_0 = const()[name = tensor("value_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60210432)))]; + tensor layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63487296)))]; + tensor value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = var_346, groups = var_298, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = var_344, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor("value_3_cast_fp16")]; + tensor var_350 = const()[name = tensor("op_350"), val = tensor([1, 20, 64, -1])]; + tensor var_351_cast_fp16 = reshape(shape = var_350, x = query_3_cast_fp16)[name = tensor("op_351_cast_fp16")]; + tensor var_352_to_fp16 = const()[name = tensor("op_352_to_fp16"), val = tensor(0x1p-3)]; + tensor var_353_cast_fp16 = mul(x = var_351_cast_fp16, y = var_352_to_fp16)[name = tensor("op_353_cast_fp16")]; + tensor var_354 = const()[name = tensor("op_354"), val = tensor([1, 20, 64, -1])]; + tensor var_355_cast_fp16 = reshape(shape = var_354, x = key_3_cast_fp16)[name = tensor("op_355_cast_fp16")]; + tensor mh_w_3_transpose_x_0 = const()[name = tensor("mh_w_3_transpose_x_0"), val = tensor(true)]; + tensor mh_w_3_transpose_y_0 = const()[name = tensor("mh_w_3_transpose_y_0"), val = tensor(false)]; + tensor mh_w_3_cast_fp16 = matmul(transpose_x = mh_w_3_transpose_x_0, transpose_y = mh_w_3_transpose_y_0, x = var_353_cast_fp16, y = var_355_cast_fp16)[name = tensor("mh_w_3_cast_fp16")]; + tensor var_358_cast_fp16 = softmax(axis = var_296, x = mh_w_3_cast_fp16)[name = tensor("op_358_cast_fp16")]; + tensor var_359 = const()[name = tensor("op_359"), val = tensor([1, 20, 64, -1])]; + tensor var_360_cast_fp16 = reshape(shape = var_359, x = value_3_cast_fp16)[name = tensor("op_360_cast_fp16")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_360_cast_fp16, y = var_358_cast_fp16)[name = tensor("attn_3_cast_fp16")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([1, 1280, 1, -1])]; + tensor input_9_cast_fp16 = reshape(shape = var_363, x = attn_3_cast_fp16)[name = tensor("input_9_cast_fp16")]; + tensor var_367 = const()[name = tensor("op_367"), val = tensor([1, 1])]; + tensor var_369 = const()[name = tensor("op_369"), val = tensor([1, 1])]; + tensor obj_7_pad_type_0 = const()[name = tensor("obj_7_pad_type_0"), val = tensor("custom")]; + tensor obj_7_pad_0 = const()[name = tensor("obj_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63489920)))]; + tensor layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(66766784)))]; + tensor obj_7_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = var_369, groups = var_298, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = var_367, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_9_cast_fp16)[name = tensor("obj_7_cast_fp16")]; + tensor inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = obj_7_cast_fp16)[name = tensor("inputs_7_cast_fp16")]; + tensor var_375 = const()[name = tensor("op_375"), val = tensor([1])]; + tensor channels_mean_7_cast_fp16 = reduce_mean(axes = var_375, keep_dims = var_299, x = inputs_7_cast_fp16)[name = tensor("channels_mean_7_cast_fp16")]; + tensor zero_mean_7_cast_fp16 = sub(x = inputs_7_cast_fp16, y = channels_mean_7_cast_fp16)[name = tensor("zero_mean_7_cast_fp16")]; + tensor zero_mean_sq_7_cast_fp16 = mul(x = zero_mean_7_cast_fp16, y = zero_mean_7_cast_fp16)[name = tensor("zero_mean_sq_7_cast_fp16")]; + tensor var_379 = const()[name = tensor("op_379"), val = tensor([1])]; + tensor var_380_cast_fp16 = reduce_mean(axes = var_379, keep_dims = var_299, x = zero_mean_sq_7_cast_fp16)[name = tensor("op_380_cast_fp16")]; + tensor var_381_to_fp16 = const()[name = tensor("op_381_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_382_cast_fp16 = add(x = var_380_cast_fp16, y = var_381_to_fp16)[name = tensor("op_382_cast_fp16")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast_fp16 = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_382_cast_fp16)[name = tensor("denom_7_cast_fp16")]; + tensor out_7_cast_fp16 = mul(x = zero_mean_7_cast_fp16, y = denom_7_cast_fp16)[name = tensor("out_7_cast_fp16")]; + tensor input_11_gamma_0_to_fp16 = const()[name = tensor("input_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(66769408)))]; + tensor input_11_beta_0_to_fp16 = const()[name = tensor("input_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(66772032)))]; + tensor input_11_epsilon_0_to_fp16 = const()[name = tensor("input_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_11_cast_fp16 = batch_norm(beta = input_11_beta_0_to_fp16, epsilon = input_11_epsilon_0_to_fp16, gamma = input_11_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor("input_11_cast_fp16")]; + tensor var_393 = const()[name = tensor("op_393"), val = tensor([1, 1])]; + tensor var_395 = const()[name = tensor("op_395"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_fc1_weight_to_fp16 = const()[name = tensor("layers_1_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(66774656)))]; + tensor layers_1_fc1_bias_to_fp16 = const()[name = tensor("layers_1_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79881920)))]; + tensor input_13_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = var_395, groups = var_298, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_393, weight = layers_1_fc1_weight_to_fp16, x = input_11_cast_fp16)[name = tensor("input_13_cast_fp16")]; + tensor input_15_mode_0 = const()[name = tensor("input_15_mode_0"), val = tensor("EXACT")]; + tensor input_15_cast_fp16 = gelu(mode = input_15_mode_0, x = input_13_cast_fp16)[name = tensor("input_15_cast_fp16")]; + tensor var_401 = const()[name = tensor("op_401"), val = tensor([1, 1])]; + tensor var_403 = const()[name = tensor("op_403"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_1_fc2_weight_to_fp16 = const()[name = tensor("layers_1_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79892224)))]; + tensor layers_1_fc2_bias_to_fp16 = const()[name = tensor("layers_1_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92999488)))]; + tensor hidden_states_7_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = var_403, groups = var_298, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_401, weight = layers_1_fc2_weight_to_fp16, x = input_15_cast_fp16)[name = tensor("hidden_states_7_cast_fp16")]; + tensor inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = hidden_states_7_cast_fp16)[name = tensor("inputs_9_cast_fp16")]; + tensor var_414 = const()[name = tensor("op_414"), val = tensor(3)]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor(1)]; + tensor var_417 = const()[name = tensor("op_417"), val = tensor(true)]; + tensor var_427 = const()[name = tensor("op_427"), val = tensor([1])]; + tensor channels_mean_9_cast_fp16 = reduce_mean(axes = var_427, keep_dims = var_417, x = inputs_9_cast_fp16)[name = tensor("channels_mean_9_cast_fp16")]; + tensor zero_mean_9_cast_fp16 = sub(x = inputs_9_cast_fp16, y = channels_mean_9_cast_fp16)[name = tensor("zero_mean_9_cast_fp16")]; + tensor zero_mean_sq_9_cast_fp16 = mul(x = zero_mean_9_cast_fp16, y = zero_mean_9_cast_fp16)[name = tensor("zero_mean_sq_9_cast_fp16")]; + tensor var_431 = const()[name = tensor("op_431"), val = tensor([1])]; + tensor var_432_cast_fp16 = reduce_mean(axes = var_431, keep_dims = var_417, x = zero_mean_sq_9_cast_fp16)[name = tensor("op_432_cast_fp16")]; + tensor var_433_to_fp16 = const()[name = tensor("op_433_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_434_cast_fp16 = add(x = var_432_cast_fp16, y = var_433_to_fp16)[name = tensor("op_434_cast_fp16")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast_fp16 = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_434_cast_fp16)[name = tensor("denom_9_cast_fp16")]; + tensor out_9_cast_fp16 = mul(x = zero_mean_9_cast_fp16, y = denom_9_cast_fp16)[name = tensor("out_9_cast_fp16")]; + tensor obj_9_gamma_0_to_fp16 = const()[name = tensor("obj_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93002112)))]; + tensor obj_9_beta_0_to_fp16 = const()[name = tensor("obj_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93004736)))]; + tensor obj_9_epsilon_0_to_fp16 = const()[name = tensor("obj_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor("obj_9_cast_fp16")]; + tensor var_449 = const()[name = tensor("op_449"), val = tensor([1, 1])]; + tensor var_451 = const()[name = tensor("op_451"), val = tensor([1, 1])]; + tensor query_5_pad_type_0 = const()[name = tensor("query_5_pad_type_0"), val = tensor("custom")]; + tensor query_5_pad_0 = const()[name = tensor("query_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93007360)))]; + tensor layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96284224)))]; + tensor query_5_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = var_451, groups = var_416, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = var_449, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor("query_5_cast_fp16")]; + tensor var_455 = const()[name = tensor("op_455"), val = tensor([1, 1])]; + tensor var_457 = const()[name = tensor("op_457"), val = tensor([1, 1])]; + tensor key_5_pad_type_0 = const()[name = tensor("key_5_pad_type_0"), val = tensor("custom")]; + tensor key_5_pad_0 = const()[name = tensor("key_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96286848)))]; + tensor key_5_cast_fp16 = conv(dilations = var_457, groups = var_416, pad = key_5_pad_0, pad_type = key_5_pad_type_0, strides = var_455, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor("key_5_cast_fp16")]; + tensor var_462 = const()[name = tensor("op_462"), val = tensor([1, 1])]; + tensor var_464 = const()[name = tensor("op_464"), val = tensor([1, 1])]; + tensor value_5_pad_type_0 = const()[name = tensor("value_5_pad_type_0"), val = tensor("custom")]; + tensor value_5_pad_0 = const()[name = tensor("value_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99563712)))]; + tensor layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102840576)))]; + tensor value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = var_464, groups = var_416, pad = value_5_pad_0, pad_type = value_5_pad_type_0, strides = var_462, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor("value_5_cast_fp16")]; + tensor var_468 = const()[name = tensor("op_468"), val = tensor([1, 20, 64, -1])]; + tensor var_469_cast_fp16 = reshape(shape = var_468, x = query_5_cast_fp16)[name = tensor("op_469_cast_fp16")]; + tensor var_470_to_fp16 = const()[name = tensor("op_470_to_fp16"), val = tensor(0x1p-3)]; + tensor var_471_cast_fp16 = mul(x = var_469_cast_fp16, y = var_470_to_fp16)[name = tensor("op_471_cast_fp16")]; + tensor var_472 = const()[name = tensor("op_472"), val = tensor([1, 20, 64, -1])]; + tensor var_473_cast_fp16 = reshape(shape = var_472, x = key_5_cast_fp16)[name = tensor("op_473_cast_fp16")]; + tensor mh_w_5_transpose_x_0 = const()[name = tensor("mh_w_5_transpose_x_0"), val = tensor(true)]; + tensor mh_w_5_transpose_y_0 = const()[name = tensor("mh_w_5_transpose_y_0"), val = tensor(false)]; + tensor mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_471_cast_fp16, y = var_473_cast_fp16)[name = tensor("mh_w_5_cast_fp16")]; + tensor var_476_cast_fp16 = softmax(axis = var_414, x = mh_w_5_cast_fp16)[name = tensor("op_476_cast_fp16")]; + tensor var_477 = const()[name = tensor("op_477"), val = tensor([1, 20, 64, -1])]; + tensor var_478_cast_fp16 = reshape(shape = var_477, x = value_5_cast_fp16)[name = tensor("op_478_cast_fp16")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_478_cast_fp16, y = var_476_cast_fp16)[name = tensor("attn_5_cast_fp16")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([1, 1280, 1, -1])]; + tensor input_17_cast_fp16 = reshape(shape = var_481, x = attn_5_cast_fp16)[name = tensor("input_17_cast_fp16")]; + tensor var_485 = const()[name = tensor("op_485"), val = tensor([1, 1])]; + tensor var_487 = const()[name = tensor("op_487"), val = tensor([1, 1])]; + tensor obj_11_pad_type_0 = const()[name = tensor("obj_11_pad_type_0"), val = tensor("custom")]; + tensor obj_11_pad_0 = const()[name = tensor("obj_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102843200)))]; + tensor layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106120064)))]; + tensor obj_11_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = var_487, groups = var_416, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = var_485, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_17_cast_fp16)[name = tensor("obj_11_cast_fp16")]; + tensor inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_11_cast_fp16)[name = tensor("inputs_11_cast_fp16")]; + tensor var_493 = const()[name = tensor("op_493"), val = tensor([1])]; + tensor channels_mean_11_cast_fp16 = reduce_mean(axes = var_493, keep_dims = var_417, x = inputs_11_cast_fp16)[name = tensor("channels_mean_11_cast_fp16")]; + tensor zero_mean_11_cast_fp16 = sub(x = inputs_11_cast_fp16, y = channels_mean_11_cast_fp16)[name = tensor("zero_mean_11_cast_fp16")]; + tensor zero_mean_sq_11_cast_fp16 = mul(x = zero_mean_11_cast_fp16, y = zero_mean_11_cast_fp16)[name = tensor("zero_mean_sq_11_cast_fp16")]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1])]; + tensor var_498_cast_fp16 = reduce_mean(axes = var_497, keep_dims = var_417, x = zero_mean_sq_11_cast_fp16)[name = tensor("op_498_cast_fp16")]; + tensor var_499_to_fp16 = const()[name = tensor("op_499_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_500_cast_fp16 = add(x = var_498_cast_fp16, y = var_499_to_fp16)[name = tensor("op_500_cast_fp16")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast_fp16 = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_500_cast_fp16)[name = tensor("denom_11_cast_fp16")]; + tensor out_11_cast_fp16 = mul(x = zero_mean_11_cast_fp16, y = denom_11_cast_fp16)[name = tensor("out_11_cast_fp16")]; + tensor input_19_gamma_0_to_fp16 = const()[name = tensor("input_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106122688)))]; + tensor input_19_beta_0_to_fp16 = const()[name = tensor("input_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106125312)))]; + tensor input_19_epsilon_0_to_fp16 = const()[name = tensor("input_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_19_cast_fp16 = batch_norm(beta = input_19_beta_0_to_fp16, epsilon = input_19_epsilon_0_to_fp16, gamma = input_19_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor("input_19_cast_fp16")]; + tensor var_511 = const()[name = tensor("op_511"), val = tensor([1, 1])]; + tensor var_513 = const()[name = tensor("op_513"), val = tensor([1, 1])]; + tensor input_21_pad_type_0 = const()[name = tensor("input_21_pad_type_0"), val = tensor("custom")]; + tensor input_21_pad_0 = const()[name = tensor("input_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_fc1_weight_to_fp16 = const()[name = tensor("layers_2_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106127936)))]; + tensor layers_2_fc1_bias_to_fp16 = const()[name = tensor("layers_2_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(119235200)))]; + tensor input_21_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = var_513, groups = var_416, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = var_511, weight = layers_2_fc1_weight_to_fp16, x = input_19_cast_fp16)[name = tensor("input_21_cast_fp16")]; + tensor input_23_mode_0 = const()[name = tensor("input_23_mode_0"), val = tensor("EXACT")]; + tensor input_23_cast_fp16 = gelu(mode = input_23_mode_0, x = input_21_cast_fp16)[name = tensor("input_23_cast_fp16")]; + tensor var_519 = const()[name = tensor("op_519"), val = tensor([1, 1])]; + tensor var_521 = const()[name = tensor("op_521"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_2_fc2_weight_to_fp16 = const()[name = tensor("layers_2_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(119245504)))]; + tensor layers_2_fc2_bias_to_fp16 = const()[name = tensor("layers_2_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132352768)))]; + tensor hidden_states_9_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = var_521, groups = var_416, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_519, weight = layers_2_fc2_weight_to_fp16, x = input_23_cast_fp16)[name = tensor("hidden_states_9_cast_fp16")]; + tensor inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_9_cast_fp16)[name = tensor("inputs_13_cast_fp16")]; + tensor var_532 = const()[name = tensor("op_532"), val = tensor(3)]; + tensor var_534 = const()[name = tensor("op_534"), val = tensor(1)]; + tensor var_535 = const()[name = tensor("op_535"), val = tensor(true)]; + tensor var_545 = const()[name = tensor("op_545"), val = tensor([1])]; + tensor channels_mean_13_cast_fp16 = reduce_mean(axes = var_545, keep_dims = var_535, x = inputs_13_cast_fp16)[name = tensor("channels_mean_13_cast_fp16")]; + tensor zero_mean_13_cast_fp16 = sub(x = inputs_13_cast_fp16, y = channels_mean_13_cast_fp16)[name = tensor("zero_mean_13_cast_fp16")]; + tensor zero_mean_sq_13_cast_fp16 = mul(x = zero_mean_13_cast_fp16, y = zero_mean_13_cast_fp16)[name = tensor("zero_mean_sq_13_cast_fp16")]; + tensor var_549 = const()[name = tensor("op_549"), val = tensor([1])]; + tensor var_550_cast_fp16 = reduce_mean(axes = var_549, keep_dims = var_535, x = zero_mean_sq_13_cast_fp16)[name = tensor("op_550_cast_fp16")]; + tensor var_551_to_fp16 = const()[name = tensor("op_551_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_552_cast_fp16 = add(x = var_550_cast_fp16, y = var_551_to_fp16)[name = tensor("op_552_cast_fp16")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast_fp16 = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_552_cast_fp16)[name = tensor("denom_13_cast_fp16")]; + tensor out_13_cast_fp16 = mul(x = zero_mean_13_cast_fp16, y = denom_13_cast_fp16)[name = tensor("out_13_cast_fp16")]; + tensor obj_13_gamma_0_to_fp16 = const()[name = tensor("obj_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132355392)))]; + tensor obj_13_beta_0_to_fp16 = const()[name = tensor("obj_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132358016)))]; + tensor obj_13_epsilon_0_to_fp16 = const()[name = tensor("obj_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_13_cast_fp16)[name = tensor("obj_13_cast_fp16")]; + tensor var_567 = const()[name = tensor("op_567"), val = tensor([1, 1])]; + tensor var_569 = const()[name = tensor("op_569"), val = tensor([1, 1])]; + tensor query_7_pad_type_0 = const()[name = tensor("query_7_pad_type_0"), val = tensor("custom")]; + tensor query_7_pad_0 = const()[name = tensor("query_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132360640)))]; + tensor layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(135637504)))]; + tensor query_7_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = var_569, groups = var_534, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = var_567, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor("query_7_cast_fp16")]; + tensor var_573 = const()[name = tensor("op_573"), val = tensor([1, 1])]; + tensor var_575 = const()[name = tensor("op_575"), val = tensor([1, 1])]; + tensor key_7_pad_type_0 = const()[name = tensor("key_7_pad_type_0"), val = tensor("custom")]; + tensor key_7_pad_0 = const()[name = tensor("key_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(135640128)))]; + tensor key_7_cast_fp16 = conv(dilations = var_575, groups = var_534, pad = key_7_pad_0, pad_type = key_7_pad_type_0, strides = var_573, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor("key_7_cast_fp16")]; + tensor var_580 = const()[name = tensor("op_580"), val = tensor([1, 1])]; + tensor var_582 = const()[name = tensor("op_582"), val = tensor([1, 1])]; + tensor value_7_pad_type_0 = const()[name = tensor("value_7_pad_type_0"), val = tensor("custom")]; + tensor value_7_pad_0 = const()[name = tensor("value_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(138916992)))]; + tensor layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(142193856)))]; + tensor value_7_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = var_582, groups = var_534, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = var_580, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor("value_7_cast_fp16")]; + tensor var_586 = const()[name = tensor("op_586"), val = tensor([1, 20, 64, -1])]; + tensor var_587_cast_fp16 = reshape(shape = var_586, x = query_7_cast_fp16)[name = tensor("op_587_cast_fp16")]; + tensor var_588_to_fp16 = const()[name = tensor("op_588_to_fp16"), val = tensor(0x1p-3)]; + tensor var_589_cast_fp16 = mul(x = var_587_cast_fp16, y = var_588_to_fp16)[name = tensor("op_589_cast_fp16")]; + tensor var_590 = const()[name = tensor("op_590"), val = tensor([1, 20, 64, -1])]; + tensor var_591_cast_fp16 = reshape(shape = var_590, x = key_7_cast_fp16)[name = tensor("op_591_cast_fp16")]; + tensor mh_w_7_transpose_x_0 = const()[name = tensor("mh_w_7_transpose_x_0"), val = tensor(true)]; + tensor mh_w_7_transpose_y_0 = const()[name = tensor("mh_w_7_transpose_y_0"), val = tensor(false)]; + tensor mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_589_cast_fp16, y = var_591_cast_fp16)[name = tensor("mh_w_7_cast_fp16")]; + tensor var_594_cast_fp16 = softmax(axis = var_532, x = mh_w_7_cast_fp16)[name = tensor("op_594_cast_fp16")]; + tensor var_595 = const()[name = tensor("op_595"), val = tensor([1, 20, 64, -1])]; + tensor var_596_cast_fp16 = reshape(shape = var_595, x = value_7_cast_fp16)[name = tensor("op_596_cast_fp16")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast_fp16 = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_596_cast_fp16, y = var_594_cast_fp16)[name = tensor("attn_7_cast_fp16")]; + tensor var_599 = const()[name = tensor("op_599"), val = tensor([1, 1280, 1, -1])]; + tensor input_25_cast_fp16 = reshape(shape = var_599, x = attn_7_cast_fp16)[name = tensor("input_25_cast_fp16")]; + tensor var_603 = const()[name = tensor("op_603"), val = tensor([1, 1])]; + tensor var_605 = const()[name = tensor("op_605"), val = tensor([1, 1])]; + tensor obj_15_pad_type_0 = const()[name = tensor("obj_15_pad_type_0"), val = tensor("custom")]; + tensor obj_15_pad_0 = const()[name = tensor("obj_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(142196480)))]; + tensor layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145473344)))]; + tensor obj_15_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = var_605, groups = var_534, pad = obj_15_pad_0, pad_type = obj_15_pad_type_0, strides = var_603, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_25_cast_fp16)[name = tensor("obj_15_cast_fp16")]; + tensor inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_15_cast_fp16)[name = tensor("inputs_15_cast_fp16")]; + tensor var_611 = const()[name = tensor("op_611"), val = tensor([1])]; + tensor channels_mean_15_cast_fp16 = reduce_mean(axes = var_611, keep_dims = var_535, x = inputs_15_cast_fp16)[name = tensor("channels_mean_15_cast_fp16")]; + tensor zero_mean_15_cast_fp16 = sub(x = inputs_15_cast_fp16, y = channels_mean_15_cast_fp16)[name = tensor("zero_mean_15_cast_fp16")]; + tensor zero_mean_sq_15_cast_fp16 = mul(x = zero_mean_15_cast_fp16, y = zero_mean_15_cast_fp16)[name = tensor("zero_mean_sq_15_cast_fp16")]; + tensor var_615 = const()[name = tensor("op_615"), val = tensor([1])]; + tensor var_616_cast_fp16 = reduce_mean(axes = var_615, keep_dims = var_535, x = zero_mean_sq_15_cast_fp16)[name = tensor("op_616_cast_fp16")]; + tensor var_617_to_fp16 = const()[name = tensor("op_617_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_618_cast_fp16 = add(x = var_616_cast_fp16, y = var_617_to_fp16)[name = tensor("op_618_cast_fp16")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast_fp16 = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_618_cast_fp16)[name = tensor("denom_15_cast_fp16")]; + tensor out_15_cast_fp16 = mul(x = zero_mean_15_cast_fp16, y = denom_15_cast_fp16)[name = tensor("out_15_cast_fp16")]; + tensor input_27_gamma_0_to_fp16 = const()[name = tensor("input_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145475968)))]; + tensor input_27_beta_0_to_fp16 = const()[name = tensor("input_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145478592)))]; + tensor input_27_epsilon_0_to_fp16 = const()[name = tensor("input_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_27_cast_fp16 = batch_norm(beta = input_27_beta_0_to_fp16, epsilon = input_27_epsilon_0_to_fp16, gamma = input_27_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_15_cast_fp16)[name = tensor("input_27_cast_fp16")]; + tensor var_629 = const()[name = tensor("op_629"), val = tensor([1, 1])]; + tensor var_631 = const()[name = tensor("op_631"), val = tensor([1, 1])]; + tensor input_29_pad_type_0 = const()[name = tensor("input_29_pad_type_0"), val = tensor("custom")]; + tensor input_29_pad_0 = const()[name = tensor("input_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_fc1_weight_to_fp16 = const()[name = tensor("layers_3_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145481216)))]; + tensor layers_3_fc1_bias_to_fp16 = const()[name = tensor("layers_3_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158588480)))]; + tensor input_29_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = var_631, groups = var_534, pad = input_29_pad_0, pad_type = input_29_pad_type_0, strides = var_629, weight = layers_3_fc1_weight_to_fp16, x = input_27_cast_fp16)[name = tensor("input_29_cast_fp16")]; + tensor input_31_mode_0 = const()[name = tensor("input_31_mode_0"), val = tensor("EXACT")]; + tensor input_31_cast_fp16 = gelu(mode = input_31_mode_0, x = input_29_cast_fp16)[name = tensor("input_31_cast_fp16")]; + tensor var_637 = const()[name = tensor("op_637"), val = tensor([1, 1])]; + tensor var_639 = const()[name = tensor("op_639"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_3_fc2_weight_to_fp16 = const()[name = tensor("layers_3_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158598784)))]; + tensor layers_3_fc2_bias_to_fp16 = const()[name = tensor("layers_3_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171706048)))]; + tensor hidden_states_11_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = var_639, groups = var_534, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_637, weight = layers_3_fc2_weight_to_fp16, x = input_31_cast_fp16)[name = tensor("hidden_states_11_cast_fp16")]; + tensor inputs_17_cast_fp16 = add(x = inputs_15_cast_fp16, y = hidden_states_11_cast_fp16)[name = tensor("inputs_17_cast_fp16")]; + tensor var_650 = const()[name = tensor("op_650"), val = tensor(3)]; + tensor var_652 = const()[name = tensor("op_652"), val = tensor(1)]; + tensor var_653 = const()[name = tensor("op_653"), val = tensor(true)]; + tensor var_663 = const()[name = tensor("op_663"), val = tensor([1])]; + tensor channels_mean_17_cast_fp16 = reduce_mean(axes = var_663, keep_dims = var_653, x = inputs_17_cast_fp16)[name = tensor("channels_mean_17_cast_fp16")]; + tensor zero_mean_17_cast_fp16 = sub(x = inputs_17_cast_fp16, y = channels_mean_17_cast_fp16)[name = tensor("zero_mean_17_cast_fp16")]; + tensor zero_mean_sq_17_cast_fp16 = mul(x = zero_mean_17_cast_fp16, y = zero_mean_17_cast_fp16)[name = tensor("zero_mean_sq_17_cast_fp16")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([1])]; + tensor var_668_cast_fp16 = reduce_mean(axes = var_667, keep_dims = var_653, x = zero_mean_sq_17_cast_fp16)[name = tensor("op_668_cast_fp16")]; + tensor var_669_to_fp16 = const()[name = tensor("op_669_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_670_cast_fp16 = add(x = var_668_cast_fp16, y = var_669_to_fp16)[name = tensor("op_670_cast_fp16")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast_fp16 = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_670_cast_fp16)[name = tensor("denom_17_cast_fp16")]; + tensor out_17_cast_fp16 = mul(x = zero_mean_17_cast_fp16, y = denom_17_cast_fp16)[name = tensor("out_17_cast_fp16")]; + tensor obj_17_gamma_0_to_fp16 = const()[name = tensor("obj_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171708672)))]; + tensor obj_17_beta_0_to_fp16 = const()[name = tensor("obj_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171711296)))]; + tensor obj_17_epsilon_0_to_fp16 = const()[name = tensor("obj_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_17_cast_fp16 = batch_norm(beta = obj_17_beta_0_to_fp16, epsilon = obj_17_epsilon_0_to_fp16, gamma = obj_17_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_17_cast_fp16)[name = tensor("obj_17_cast_fp16")]; + tensor var_685 = const()[name = tensor("op_685"), val = tensor([1, 1])]; + tensor var_687 = const()[name = tensor("op_687"), val = tensor([1, 1])]; + tensor query_9_pad_type_0 = const()[name = tensor("query_9_pad_type_0"), val = tensor("custom")]; + tensor query_9_pad_0 = const()[name = tensor("query_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171713920)))]; + tensor layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174990784)))]; + tensor query_9_cast_fp16 = conv(bias = layers_4_self_attn_q_proj_bias_to_fp16, dilations = var_687, groups = var_652, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = var_685, weight = layers_4_self_attn_q_proj_weight_to_fp16, x = obj_17_cast_fp16)[name = tensor("query_9_cast_fp16")]; + tensor var_691 = const()[name = tensor("op_691"), val = tensor([1, 1])]; + tensor var_693 = const()[name = tensor("op_693"), val = tensor([1, 1])]; + tensor key_9_pad_type_0 = const()[name = tensor("key_9_pad_type_0"), val = tensor("custom")]; + tensor key_9_pad_0 = const()[name = tensor("key_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174993408)))]; + tensor key_9_cast_fp16 = conv(dilations = var_693, groups = var_652, pad = key_9_pad_0, pad_type = key_9_pad_type_0, strides = var_691, weight = layers_4_self_attn_k_proj_weight_to_fp16, x = obj_17_cast_fp16)[name = tensor("key_9_cast_fp16")]; + tensor var_698 = const()[name = tensor("op_698"), val = tensor([1, 1])]; + tensor var_700 = const()[name = tensor("op_700"), val = tensor([1, 1])]; + tensor value_9_pad_type_0 = const()[name = tensor("value_9_pad_type_0"), val = tensor("custom")]; + tensor value_9_pad_0 = const()[name = tensor("value_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178270272)))]; + tensor layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181547136)))]; + tensor value_9_cast_fp16 = conv(bias = layers_4_self_attn_v_proj_bias_to_fp16, dilations = var_700, groups = var_652, pad = value_9_pad_0, pad_type = value_9_pad_type_0, strides = var_698, weight = layers_4_self_attn_v_proj_weight_to_fp16, x = obj_17_cast_fp16)[name = tensor("value_9_cast_fp16")]; + tensor var_704 = const()[name = tensor("op_704"), val = tensor([1, 20, 64, -1])]; + tensor var_705_cast_fp16 = reshape(shape = var_704, x = query_9_cast_fp16)[name = tensor("op_705_cast_fp16")]; + tensor var_706_to_fp16 = const()[name = tensor("op_706_to_fp16"), val = tensor(0x1p-3)]; + tensor var_707_cast_fp16 = mul(x = var_705_cast_fp16, y = var_706_to_fp16)[name = tensor("op_707_cast_fp16")]; + tensor var_708 = const()[name = tensor("op_708"), val = tensor([1, 20, 64, -1])]; + tensor var_709_cast_fp16 = reshape(shape = var_708, x = key_9_cast_fp16)[name = tensor("op_709_cast_fp16")]; + tensor mh_w_9_transpose_x_0 = const()[name = tensor("mh_w_9_transpose_x_0"), val = tensor(true)]; + tensor mh_w_9_transpose_y_0 = const()[name = tensor("mh_w_9_transpose_y_0"), val = tensor(false)]; + tensor mh_w_9_cast_fp16 = matmul(transpose_x = mh_w_9_transpose_x_0, transpose_y = mh_w_9_transpose_y_0, x = var_707_cast_fp16, y = var_709_cast_fp16)[name = tensor("mh_w_9_cast_fp16")]; + tensor var_712_cast_fp16 = softmax(axis = var_650, x = mh_w_9_cast_fp16)[name = tensor("op_712_cast_fp16")]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 20, 64, -1])]; + tensor var_714_cast_fp16 = reshape(shape = var_713, x = value_9_cast_fp16)[name = tensor("op_714_cast_fp16")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_714_cast_fp16, y = var_712_cast_fp16)[name = tensor("attn_9_cast_fp16")]; + tensor var_717 = const()[name = tensor("op_717"), val = tensor([1, 1280, 1, -1])]; + tensor input_33_cast_fp16 = reshape(shape = var_717, x = attn_9_cast_fp16)[name = tensor("input_33_cast_fp16")]; + tensor var_721 = const()[name = tensor("op_721"), val = tensor([1, 1])]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor obj_19_pad_type_0 = const()[name = tensor("obj_19_pad_type_0"), val = tensor("custom")]; + tensor obj_19_pad_0 = const()[name = tensor("obj_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_4_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181549760)))]; + tensor layers_4_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_4_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184826624)))]; + tensor obj_19_cast_fp16 = conv(bias = layers_4_self_attn_o_proj_bias_to_fp16, dilations = var_723, groups = var_652, pad = obj_19_pad_0, pad_type = obj_19_pad_type_0, strides = var_721, weight = layers_4_self_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = tensor("obj_19_cast_fp16")]; + tensor inputs_19_cast_fp16 = add(x = inputs_17_cast_fp16, y = obj_19_cast_fp16)[name = tensor("inputs_19_cast_fp16")]; + tensor var_729 = const()[name = tensor("op_729"), val = tensor([1])]; + tensor channels_mean_19_cast_fp16 = reduce_mean(axes = var_729, keep_dims = var_653, x = inputs_19_cast_fp16)[name = tensor("channels_mean_19_cast_fp16")]; + tensor zero_mean_19_cast_fp16 = sub(x = inputs_19_cast_fp16, y = channels_mean_19_cast_fp16)[name = tensor("zero_mean_19_cast_fp16")]; + tensor zero_mean_sq_19_cast_fp16 = mul(x = zero_mean_19_cast_fp16, y = zero_mean_19_cast_fp16)[name = tensor("zero_mean_sq_19_cast_fp16")]; + tensor var_733 = const()[name = tensor("op_733"), val = tensor([1])]; + tensor var_734_cast_fp16 = reduce_mean(axes = var_733, keep_dims = var_653, x = zero_mean_sq_19_cast_fp16)[name = tensor("op_734_cast_fp16")]; + tensor var_735_to_fp16 = const()[name = tensor("op_735_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_736_cast_fp16 = add(x = var_734_cast_fp16, y = var_735_to_fp16)[name = tensor("op_736_cast_fp16")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast_fp16 = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_736_cast_fp16)[name = tensor("denom_19_cast_fp16")]; + tensor out_19_cast_fp16 = mul(x = zero_mean_19_cast_fp16, y = denom_19_cast_fp16)[name = tensor("out_19_cast_fp16")]; + tensor input_35_gamma_0_to_fp16 = const()[name = tensor("input_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184829248)))]; + tensor input_35_beta_0_to_fp16 = const()[name = tensor("input_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184831872)))]; + tensor input_35_epsilon_0_to_fp16 = const()[name = tensor("input_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_35_cast_fp16 = batch_norm(beta = input_35_beta_0_to_fp16, epsilon = input_35_epsilon_0_to_fp16, gamma = input_35_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_19_cast_fp16)[name = tensor("input_35_cast_fp16")]; + tensor var_747 = const()[name = tensor("op_747"), val = tensor([1, 1])]; + tensor var_749 = const()[name = tensor("op_749"), val = tensor([1, 1])]; + tensor input_37_pad_type_0 = const()[name = tensor("input_37_pad_type_0"), val = tensor("custom")]; + tensor input_37_pad_0 = const()[name = tensor("input_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_fc1_weight_to_fp16 = const()[name = tensor("layers_4_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184834496)))]; + tensor layers_4_fc1_bias_to_fp16 = const()[name = tensor("layers_4_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197941760)))]; + tensor input_37_cast_fp16 = conv(bias = layers_4_fc1_bias_to_fp16, dilations = var_749, groups = var_652, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = var_747, weight = layers_4_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = tensor("input_37_cast_fp16")]; + tensor input_39_mode_0 = const()[name = tensor("input_39_mode_0"), val = tensor("EXACT")]; + tensor input_39_cast_fp16 = gelu(mode = input_39_mode_0, x = input_37_cast_fp16)[name = tensor("input_39_cast_fp16")]; + tensor var_755 = const()[name = tensor("op_755"), val = tensor([1, 1])]; + tensor var_757 = const()[name = tensor("op_757"), val = tensor([1, 1])]; + tensor hidden_states_13_pad_type_0 = const()[name = tensor("hidden_states_13_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_13_pad_0 = const()[name = tensor("hidden_states_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_4_fc2_weight_to_fp16 = const()[name = tensor("layers_4_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197952064)))]; + tensor layers_4_fc2_bias_to_fp16 = const()[name = tensor("layers_4_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211059328)))]; + tensor hidden_states_13_cast_fp16 = conv(bias = layers_4_fc2_bias_to_fp16, dilations = var_757, groups = var_652, pad = hidden_states_13_pad_0, pad_type = hidden_states_13_pad_type_0, strides = var_755, weight = layers_4_fc2_weight_to_fp16, x = input_39_cast_fp16)[name = tensor("hidden_states_13_cast_fp16")]; + tensor inputs_21_cast_fp16 = add(x = inputs_19_cast_fp16, y = hidden_states_13_cast_fp16)[name = tensor("inputs_21_cast_fp16")]; + tensor var_768 = const()[name = tensor("op_768"), val = tensor(3)]; + tensor var_770 = const()[name = tensor("op_770"), val = tensor(1)]; + tensor var_771 = const()[name = tensor("op_771"), val = tensor(true)]; + tensor var_781 = const()[name = tensor("op_781"), val = tensor([1])]; + tensor channels_mean_21_cast_fp16 = reduce_mean(axes = var_781, keep_dims = var_771, x = inputs_21_cast_fp16)[name = tensor("channels_mean_21_cast_fp16")]; + tensor zero_mean_21_cast_fp16 = sub(x = inputs_21_cast_fp16, y = channels_mean_21_cast_fp16)[name = tensor("zero_mean_21_cast_fp16")]; + tensor zero_mean_sq_21_cast_fp16 = mul(x = zero_mean_21_cast_fp16, y = zero_mean_21_cast_fp16)[name = tensor("zero_mean_sq_21_cast_fp16")]; + tensor var_785 = const()[name = tensor("op_785"), val = tensor([1])]; + tensor var_786_cast_fp16 = reduce_mean(axes = var_785, keep_dims = var_771, x = zero_mean_sq_21_cast_fp16)[name = tensor("op_786_cast_fp16")]; + tensor var_787_to_fp16 = const()[name = tensor("op_787_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_788_cast_fp16 = add(x = var_786_cast_fp16, y = var_787_to_fp16)[name = tensor("op_788_cast_fp16")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast_fp16 = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_788_cast_fp16)[name = tensor("denom_21_cast_fp16")]; + tensor out_21_cast_fp16 = mul(x = zero_mean_21_cast_fp16, y = denom_21_cast_fp16)[name = tensor("out_21_cast_fp16")]; + tensor obj_21_gamma_0_to_fp16 = const()[name = tensor("obj_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211061952)))]; + tensor obj_21_beta_0_to_fp16 = const()[name = tensor("obj_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211064576)))]; + tensor obj_21_epsilon_0_to_fp16 = const()[name = tensor("obj_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_21_cast_fp16 = batch_norm(beta = obj_21_beta_0_to_fp16, epsilon = obj_21_epsilon_0_to_fp16, gamma = obj_21_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_21_cast_fp16)[name = tensor("obj_21_cast_fp16")]; + tensor var_803 = const()[name = tensor("op_803"), val = tensor([1, 1])]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([1, 1])]; + tensor query_11_pad_type_0 = const()[name = tensor("query_11_pad_type_0"), val = tensor("custom")]; + tensor query_11_pad_0 = const()[name = tensor("query_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211067200)))]; + tensor layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214344064)))]; + tensor query_11_cast_fp16 = conv(bias = layers_5_self_attn_q_proj_bias_to_fp16, dilations = var_805, groups = var_770, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = var_803, weight = layers_5_self_attn_q_proj_weight_to_fp16, x = obj_21_cast_fp16)[name = tensor("query_11_cast_fp16")]; + tensor var_809 = const()[name = tensor("op_809"), val = tensor([1, 1])]; + tensor var_811 = const()[name = tensor("op_811"), val = tensor([1, 1])]; + tensor key_11_pad_type_0 = const()[name = tensor("key_11_pad_type_0"), val = tensor("custom")]; + tensor key_11_pad_0 = const()[name = tensor("key_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214346688)))]; + tensor key_11_cast_fp16 = conv(dilations = var_811, groups = var_770, pad = key_11_pad_0, pad_type = key_11_pad_type_0, strides = var_809, weight = layers_5_self_attn_k_proj_weight_to_fp16, x = obj_21_cast_fp16)[name = tensor("key_11_cast_fp16")]; + tensor var_816 = const()[name = tensor("op_816"), val = tensor([1, 1])]; + tensor var_818 = const()[name = tensor("op_818"), val = tensor([1, 1])]; + tensor value_11_pad_type_0 = const()[name = tensor("value_11_pad_type_0"), val = tensor("custom")]; + tensor value_11_pad_0 = const()[name = tensor("value_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217623552)))]; + tensor layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220900416)))]; + tensor value_11_cast_fp16 = conv(bias = layers_5_self_attn_v_proj_bias_to_fp16, dilations = var_818, groups = var_770, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = var_816, weight = layers_5_self_attn_v_proj_weight_to_fp16, x = obj_21_cast_fp16)[name = tensor("value_11_cast_fp16")]; + tensor var_822 = const()[name = tensor("op_822"), val = tensor([1, 20, 64, -1])]; + tensor var_823_cast_fp16 = reshape(shape = var_822, x = query_11_cast_fp16)[name = tensor("op_823_cast_fp16")]; + tensor var_824_to_fp16 = const()[name = tensor("op_824_to_fp16"), val = tensor(0x1p-3)]; + tensor var_825_cast_fp16 = mul(x = var_823_cast_fp16, y = var_824_to_fp16)[name = tensor("op_825_cast_fp16")]; + tensor var_826 = const()[name = tensor("op_826"), val = tensor([1, 20, 64, -1])]; + tensor var_827_cast_fp16 = reshape(shape = var_826, x = key_11_cast_fp16)[name = tensor("op_827_cast_fp16")]; + tensor mh_w_11_transpose_x_0 = const()[name = tensor("mh_w_11_transpose_x_0"), val = tensor(true)]; + tensor mh_w_11_transpose_y_0 = const()[name = tensor("mh_w_11_transpose_y_0"), val = tensor(false)]; + tensor mh_w_11_cast_fp16 = matmul(transpose_x = mh_w_11_transpose_x_0, transpose_y = mh_w_11_transpose_y_0, x = var_825_cast_fp16, y = var_827_cast_fp16)[name = tensor("mh_w_11_cast_fp16")]; + tensor var_830_cast_fp16 = softmax(axis = var_768, x = mh_w_11_cast_fp16)[name = tensor("op_830_cast_fp16")]; + tensor var_831 = const()[name = tensor("op_831"), val = tensor([1, 20, 64, -1])]; + tensor var_832_cast_fp16 = reshape(shape = var_831, x = value_11_cast_fp16)[name = tensor("op_832_cast_fp16")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast_fp16 = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_832_cast_fp16, y = var_830_cast_fp16)[name = tensor("attn_11_cast_fp16")]; + tensor var_835 = const()[name = tensor("op_835"), val = tensor([1, 1280, 1, -1])]; + tensor input_41_cast_fp16 = reshape(shape = var_835, x = attn_11_cast_fp16)[name = tensor("input_41_cast_fp16")]; + tensor var_839 = const()[name = tensor("op_839"), val = tensor([1, 1])]; + tensor var_841 = const()[name = tensor("op_841"), val = tensor([1, 1])]; + tensor obj_23_pad_type_0 = const()[name = tensor("obj_23_pad_type_0"), val = tensor("custom")]; + tensor obj_23_pad_0 = const()[name = tensor("obj_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_5_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220903040)))]; + tensor layers_5_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_5_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224179904)))]; + tensor obj_23_cast_fp16 = conv(bias = layers_5_self_attn_o_proj_bias_to_fp16, dilations = var_841, groups = var_770, pad = obj_23_pad_0, pad_type = obj_23_pad_type_0, strides = var_839, weight = layers_5_self_attn_o_proj_weight_to_fp16, x = input_41_cast_fp16)[name = tensor("obj_23_cast_fp16")]; + tensor inputs_23_cast_fp16 = add(x = inputs_21_cast_fp16, y = obj_23_cast_fp16)[name = tensor("inputs_23_cast_fp16")]; + tensor var_847 = const()[name = tensor("op_847"), val = tensor([1])]; + tensor channels_mean_23_cast_fp16 = reduce_mean(axes = var_847, keep_dims = var_771, x = inputs_23_cast_fp16)[name = tensor("channels_mean_23_cast_fp16")]; + tensor zero_mean_23_cast_fp16 = sub(x = inputs_23_cast_fp16, y = channels_mean_23_cast_fp16)[name = tensor("zero_mean_23_cast_fp16")]; + tensor zero_mean_sq_23_cast_fp16 = mul(x = zero_mean_23_cast_fp16, y = zero_mean_23_cast_fp16)[name = tensor("zero_mean_sq_23_cast_fp16")]; + tensor var_851 = const()[name = tensor("op_851"), val = tensor([1])]; + tensor var_852_cast_fp16 = reduce_mean(axes = var_851, keep_dims = var_771, x = zero_mean_sq_23_cast_fp16)[name = tensor("op_852_cast_fp16")]; + tensor var_853_to_fp16 = const()[name = tensor("op_853_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_854_cast_fp16 = add(x = var_852_cast_fp16, y = var_853_to_fp16)[name = tensor("op_854_cast_fp16")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast_fp16 = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_854_cast_fp16)[name = tensor("denom_23_cast_fp16")]; + tensor out_23_cast_fp16 = mul(x = zero_mean_23_cast_fp16, y = denom_23_cast_fp16)[name = tensor("out_23_cast_fp16")]; + tensor input_43_gamma_0_to_fp16 = const()[name = tensor("input_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224182528)))]; + tensor input_43_beta_0_to_fp16 = const()[name = tensor("input_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224185152)))]; + tensor input_43_epsilon_0_to_fp16 = const()[name = tensor("input_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_43_cast_fp16 = batch_norm(beta = input_43_beta_0_to_fp16, epsilon = input_43_epsilon_0_to_fp16, gamma = input_43_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_23_cast_fp16)[name = tensor("input_43_cast_fp16")]; + tensor var_865 = const()[name = tensor("op_865"), val = tensor([1, 1])]; + tensor var_867 = const()[name = tensor("op_867"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_fc1_weight_to_fp16 = const()[name = tensor("layers_5_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224187776)))]; + tensor layers_5_fc1_bias_to_fp16 = const()[name = tensor("layers_5_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237295040)))]; + tensor input_45_cast_fp16 = conv(bias = layers_5_fc1_bias_to_fp16, dilations = var_867, groups = var_770, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_865, weight = layers_5_fc1_weight_to_fp16, x = input_43_cast_fp16)[name = tensor("input_45_cast_fp16")]; + tensor input_47_mode_0 = const()[name = tensor("input_47_mode_0"), val = tensor("EXACT")]; + tensor input_47_cast_fp16 = gelu(mode = input_47_mode_0, x = input_45_cast_fp16)[name = tensor("input_47_cast_fp16")]; + tensor var_873 = const()[name = tensor("op_873"), val = tensor([1, 1])]; + tensor var_875 = const()[name = tensor("op_875"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_5_fc2_weight_to_fp16 = const()[name = tensor("layers_5_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237305344)))]; + tensor layers_5_fc2_bias_to_fp16 = const()[name = tensor("layers_5_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250412608)))]; + tensor hidden_states_15_cast_fp16 = conv(bias = layers_5_fc2_bias_to_fp16, dilations = var_875, groups = var_770, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_873, weight = layers_5_fc2_weight_to_fp16, x = input_47_cast_fp16)[name = tensor("hidden_states_15_cast_fp16")]; + tensor inputs_25_cast_fp16 = add(x = inputs_23_cast_fp16, y = hidden_states_15_cast_fp16)[name = tensor("inputs_25_cast_fp16")]; + tensor var_886 = const()[name = tensor("op_886"), val = tensor(3)]; + tensor var_888 = const()[name = tensor("op_888"), val = tensor(1)]; + tensor var_889 = const()[name = tensor("op_889"), val = tensor(true)]; + tensor var_899 = const()[name = tensor("op_899"), val = tensor([1])]; + tensor channels_mean_25_cast_fp16 = reduce_mean(axes = var_899, keep_dims = var_889, x = inputs_25_cast_fp16)[name = tensor("channels_mean_25_cast_fp16")]; + tensor zero_mean_25_cast_fp16 = sub(x = inputs_25_cast_fp16, y = channels_mean_25_cast_fp16)[name = tensor("zero_mean_25_cast_fp16")]; + tensor zero_mean_sq_25_cast_fp16 = mul(x = zero_mean_25_cast_fp16, y = zero_mean_25_cast_fp16)[name = tensor("zero_mean_sq_25_cast_fp16")]; + tensor var_903 = const()[name = tensor("op_903"), val = tensor([1])]; + tensor var_904_cast_fp16 = reduce_mean(axes = var_903, keep_dims = var_889, x = zero_mean_sq_25_cast_fp16)[name = tensor("op_904_cast_fp16")]; + tensor var_905_to_fp16 = const()[name = tensor("op_905_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_906_cast_fp16 = add(x = var_904_cast_fp16, y = var_905_to_fp16)[name = tensor("op_906_cast_fp16")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast_fp16 = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_906_cast_fp16)[name = tensor("denom_25_cast_fp16")]; + tensor out_25_cast_fp16 = mul(x = zero_mean_25_cast_fp16, y = denom_25_cast_fp16)[name = tensor("out_25_cast_fp16")]; + tensor obj_25_gamma_0_to_fp16 = const()[name = tensor("obj_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250415232)))]; + tensor obj_25_beta_0_to_fp16 = const()[name = tensor("obj_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250417856)))]; + tensor obj_25_epsilon_0_to_fp16 = const()[name = tensor("obj_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_25_cast_fp16 = batch_norm(beta = obj_25_beta_0_to_fp16, epsilon = obj_25_epsilon_0_to_fp16, gamma = obj_25_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_25_cast_fp16)[name = tensor("obj_25_cast_fp16")]; + tensor var_921 = const()[name = tensor("op_921"), val = tensor([1, 1])]; + tensor var_923 = const()[name = tensor("op_923"), val = tensor([1, 1])]; + tensor query_13_pad_type_0 = const()[name = tensor("query_13_pad_type_0"), val = tensor("custom")]; + tensor query_13_pad_0 = const()[name = tensor("query_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_6_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250420480)))]; + tensor layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253697344)))]; + tensor query_13_cast_fp16 = conv(bias = layers_6_self_attn_q_proj_bias_to_fp16, dilations = var_923, groups = var_888, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = var_921, weight = layers_6_self_attn_q_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = tensor("query_13_cast_fp16")]; + tensor var_927 = const()[name = tensor("op_927"), val = tensor([1, 1])]; + tensor var_929 = const()[name = tensor("op_929"), val = tensor([1, 1])]; + tensor key_13_pad_type_0 = const()[name = tensor("key_13_pad_type_0"), val = tensor("custom")]; + tensor key_13_pad_0 = const()[name = tensor("key_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_6_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253699968)))]; + tensor key_13_cast_fp16 = conv(dilations = var_929, groups = var_888, pad = key_13_pad_0, pad_type = key_13_pad_type_0, strides = var_927, weight = layers_6_self_attn_k_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = tensor("key_13_cast_fp16")]; + tensor var_934 = const()[name = tensor("op_934"), val = tensor([1, 1])]; + tensor var_936 = const()[name = tensor("op_936"), val = tensor([1, 1])]; + tensor value_13_pad_type_0 = const()[name = tensor("value_13_pad_type_0"), val = tensor("custom")]; + tensor value_13_pad_0 = const()[name = tensor("value_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_6_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(256976832)))]; + tensor layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260253696)))]; + tensor value_13_cast_fp16 = conv(bias = layers_6_self_attn_v_proj_bias_to_fp16, dilations = var_936, groups = var_888, pad = value_13_pad_0, pad_type = value_13_pad_type_0, strides = var_934, weight = layers_6_self_attn_v_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = tensor("value_13_cast_fp16")]; + tensor var_940 = const()[name = tensor("op_940"), val = tensor([1, 20, 64, -1])]; + tensor var_941_cast_fp16 = reshape(shape = var_940, x = query_13_cast_fp16)[name = tensor("op_941_cast_fp16")]; + tensor var_942_to_fp16 = const()[name = tensor("op_942_to_fp16"), val = tensor(0x1p-3)]; + tensor var_943_cast_fp16 = mul(x = var_941_cast_fp16, y = var_942_to_fp16)[name = tensor("op_943_cast_fp16")]; + tensor var_944 = const()[name = tensor("op_944"), val = tensor([1, 20, 64, -1])]; + tensor var_945_cast_fp16 = reshape(shape = var_944, x = key_13_cast_fp16)[name = tensor("op_945_cast_fp16")]; + tensor mh_w_13_transpose_x_0 = const()[name = tensor("mh_w_13_transpose_x_0"), val = tensor(true)]; + tensor mh_w_13_transpose_y_0 = const()[name = tensor("mh_w_13_transpose_y_0"), val = tensor(false)]; + tensor mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_943_cast_fp16, y = var_945_cast_fp16)[name = tensor("mh_w_13_cast_fp16")]; + tensor var_948_cast_fp16 = softmax(axis = var_886, x = mh_w_13_cast_fp16)[name = tensor("op_948_cast_fp16")]; + tensor var_949 = const()[name = tensor("op_949"), val = tensor([1, 20, 64, -1])]; + tensor var_950_cast_fp16 = reshape(shape = var_949, x = value_13_cast_fp16)[name = tensor("op_950_cast_fp16")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_950_cast_fp16, y = var_948_cast_fp16)[name = tensor("attn_13_cast_fp16")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([1, 1280, 1, -1])]; + tensor input_49_cast_fp16 = reshape(shape = var_953, x = attn_13_cast_fp16)[name = tensor("input_49_cast_fp16")]; + tensor var_957 = const()[name = tensor("op_957"), val = tensor([1, 1])]; + tensor var_959 = const()[name = tensor("op_959"), val = tensor([1, 1])]; + tensor obj_27_pad_type_0 = const()[name = tensor("obj_27_pad_type_0"), val = tensor("custom")]; + tensor obj_27_pad_0 = const()[name = tensor("obj_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_6_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260256320)))]; + tensor layers_6_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_6_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263533184)))]; + tensor obj_27_cast_fp16 = conv(bias = layers_6_self_attn_o_proj_bias_to_fp16, dilations = var_959, groups = var_888, pad = obj_27_pad_0, pad_type = obj_27_pad_type_0, strides = var_957, weight = layers_6_self_attn_o_proj_weight_to_fp16, x = input_49_cast_fp16)[name = tensor("obj_27_cast_fp16")]; + tensor inputs_27_cast_fp16 = add(x = inputs_25_cast_fp16, y = obj_27_cast_fp16)[name = tensor("inputs_27_cast_fp16")]; + tensor var_965 = const()[name = tensor("op_965"), val = tensor([1])]; + tensor channels_mean_27_cast_fp16 = reduce_mean(axes = var_965, keep_dims = var_889, x = inputs_27_cast_fp16)[name = tensor("channels_mean_27_cast_fp16")]; + tensor zero_mean_27_cast_fp16 = sub(x = inputs_27_cast_fp16, y = channels_mean_27_cast_fp16)[name = tensor("zero_mean_27_cast_fp16")]; + tensor zero_mean_sq_27_cast_fp16 = mul(x = zero_mean_27_cast_fp16, y = zero_mean_27_cast_fp16)[name = tensor("zero_mean_sq_27_cast_fp16")]; + tensor var_969 = const()[name = tensor("op_969"), val = tensor([1])]; + tensor var_970_cast_fp16 = reduce_mean(axes = var_969, keep_dims = var_889, x = zero_mean_sq_27_cast_fp16)[name = tensor("op_970_cast_fp16")]; + tensor var_971_to_fp16 = const()[name = tensor("op_971_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_972_cast_fp16 = add(x = var_970_cast_fp16, y = var_971_to_fp16)[name = tensor("op_972_cast_fp16")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast_fp16 = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_972_cast_fp16)[name = tensor("denom_27_cast_fp16")]; + tensor out_27_cast_fp16 = mul(x = zero_mean_27_cast_fp16, y = denom_27_cast_fp16)[name = tensor("out_27_cast_fp16")]; + tensor input_51_gamma_0_to_fp16 = const()[name = tensor("input_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263535808)))]; + tensor input_51_beta_0_to_fp16 = const()[name = tensor("input_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263538432)))]; + tensor input_51_epsilon_0_to_fp16 = const()[name = tensor("input_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_51_cast_fp16 = batch_norm(beta = input_51_beta_0_to_fp16, epsilon = input_51_epsilon_0_to_fp16, gamma = input_51_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_27_cast_fp16)[name = tensor("input_51_cast_fp16")]; + tensor var_983 = const()[name = tensor("op_983"), val = tensor([1, 1])]; + tensor var_985 = const()[name = tensor("op_985"), val = tensor([1, 1])]; + tensor input_53_pad_type_0 = const()[name = tensor("input_53_pad_type_0"), val = tensor("custom")]; + tensor input_53_pad_0 = const()[name = tensor("input_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_fc1_weight_to_fp16 = const()[name = tensor("layers_6_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(263541056)))]; + tensor layers_6_fc1_bias_to_fp16 = const()[name = tensor("layers_6_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276648320)))]; + tensor input_53_cast_fp16 = conv(bias = layers_6_fc1_bias_to_fp16, dilations = var_985, groups = var_888, pad = input_53_pad_0, pad_type = input_53_pad_type_0, strides = var_983, weight = layers_6_fc1_weight_to_fp16, x = input_51_cast_fp16)[name = tensor("input_53_cast_fp16")]; + tensor input_55_mode_0 = const()[name = tensor("input_55_mode_0"), val = tensor("EXACT")]; + tensor input_55_cast_fp16 = gelu(mode = input_55_mode_0, x = input_53_cast_fp16)[name = tensor("input_55_cast_fp16")]; + tensor var_991 = const()[name = tensor("op_991"), val = tensor([1, 1])]; + tensor var_993 = const()[name = tensor("op_993"), val = tensor([1, 1])]; + tensor hidden_states_17_pad_type_0 = const()[name = tensor("hidden_states_17_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_17_pad_0 = const()[name = tensor("hidden_states_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_6_fc2_weight_to_fp16 = const()[name = tensor("layers_6_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276658624)))]; + tensor layers_6_fc2_bias_to_fp16 = const()[name = tensor("layers_6_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289765888)))]; + tensor hidden_states_17_cast_fp16 = conv(bias = layers_6_fc2_bias_to_fp16, dilations = var_993, groups = var_888, pad = hidden_states_17_pad_0, pad_type = hidden_states_17_pad_type_0, strides = var_991, weight = layers_6_fc2_weight_to_fp16, x = input_55_cast_fp16)[name = tensor("hidden_states_17_cast_fp16")]; + tensor inputs_29_cast_fp16 = add(x = inputs_27_cast_fp16, y = hidden_states_17_cast_fp16)[name = tensor("inputs_29_cast_fp16")]; + tensor var_1004 = const()[name = tensor("op_1004"), val = tensor(3)]; + tensor var_1006 = const()[name = tensor("op_1006"), val = tensor(1)]; + tensor var_1007 = const()[name = tensor("op_1007"), val = tensor(true)]; + tensor var_1017 = const()[name = tensor("op_1017"), val = tensor([1])]; + tensor channels_mean_29_cast_fp16 = reduce_mean(axes = var_1017, keep_dims = var_1007, x = inputs_29_cast_fp16)[name = tensor("channels_mean_29_cast_fp16")]; + tensor zero_mean_29_cast_fp16 = sub(x = inputs_29_cast_fp16, y = channels_mean_29_cast_fp16)[name = tensor("zero_mean_29_cast_fp16")]; + tensor zero_mean_sq_29_cast_fp16 = mul(x = zero_mean_29_cast_fp16, y = zero_mean_29_cast_fp16)[name = tensor("zero_mean_sq_29_cast_fp16")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([1])]; + tensor var_1022_cast_fp16 = reduce_mean(axes = var_1021, keep_dims = var_1007, x = zero_mean_sq_29_cast_fp16)[name = tensor("op_1022_cast_fp16")]; + tensor var_1023_to_fp16 = const()[name = tensor("op_1023_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1024_cast_fp16 = add(x = var_1022_cast_fp16, y = var_1023_to_fp16)[name = tensor("op_1024_cast_fp16")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast_fp16 = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1024_cast_fp16)[name = tensor("denom_29_cast_fp16")]; + tensor out_29_cast_fp16 = mul(x = zero_mean_29_cast_fp16, y = denom_29_cast_fp16)[name = tensor("out_29_cast_fp16")]; + tensor obj_29_gamma_0_to_fp16 = const()[name = tensor("obj_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289768512)))]; + tensor obj_29_beta_0_to_fp16 = const()[name = tensor("obj_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289771136)))]; + tensor obj_29_epsilon_0_to_fp16 = const()[name = tensor("obj_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_29_cast_fp16 = batch_norm(beta = obj_29_beta_0_to_fp16, epsilon = obj_29_epsilon_0_to_fp16, gamma = obj_29_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_29_cast_fp16)[name = tensor("obj_29_cast_fp16")]; + tensor var_1039 = const()[name = tensor("op_1039"), val = tensor([1, 1])]; + tensor var_1041 = const()[name = tensor("op_1041"), val = tensor([1, 1])]; + tensor query_15_pad_type_0 = const()[name = tensor("query_15_pad_type_0"), val = tensor("custom")]; + tensor query_15_pad_0 = const()[name = tensor("query_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_7_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289773760)))]; + tensor layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293050624)))]; + tensor query_15_cast_fp16 = conv(bias = layers_7_self_attn_q_proj_bias_to_fp16, dilations = var_1041, groups = var_1006, pad = query_15_pad_0, pad_type = query_15_pad_type_0, strides = var_1039, weight = layers_7_self_attn_q_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("query_15_cast_fp16")]; + tensor var_1045 = const()[name = tensor("op_1045"), val = tensor([1, 1])]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1, 1])]; + tensor key_15_pad_type_0 = const()[name = tensor("key_15_pad_type_0"), val = tensor("custom")]; + tensor key_15_pad_0 = const()[name = tensor("key_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_7_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293053248)))]; + tensor key_15_cast_fp16 = conv(dilations = var_1047, groups = var_1006, pad = key_15_pad_0, pad_type = key_15_pad_type_0, strides = var_1045, weight = layers_7_self_attn_k_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("key_15_cast_fp16")]; + tensor var_1052 = const()[name = tensor("op_1052"), val = tensor([1, 1])]; + tensor var_1054 = const()[name = tensor("op_1054"), val = tensor([1, 1])]; + tensor value_15_pad_type_0 = const()[name = tensor("value_15_pad_type_0"), val = tensor("custom")]; + tensor value_15_pad_0 = const()[name = tensor("value_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_7_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(296330112)))]; + tensor layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(299606976)))]; + tensor value_15_cast_fp16 = conv(bias = layers_7_self_attn_v_proj_bias_to_fp16, dilations = var_1054, groups = var_1006, pad = value_15_pad_0, pad_type = value_15_pad_type_0, strides = var_1052, weight = layers_7_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("value_15_cast_fp16")]; + tensor var_1058 = const()[name = tensor("op_1058"), val = tensor([1, 20, 64, -1])]; + tensor var_1059_cast_fp16 = reshape(shape = var_1058, x = query_15_cast_fp16)[name = tensor("op_1059_cast_fp16")]; + tensor var_1060_to_fp16 = const()[name = tensor("op_1060_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1061_cast_fp16 = mul(x = var_1059_cast_fp16, y = var_1060_to_fp16)[name = tensor("op_1061_cast_fp16")]; + tensor var_1062 = const()[name = tensor("op_1062"), val = tensor([1, 20, 64, -1])]; + tensor var_1063_cast_fp16 = reshape(shape = var_1062, x = key_15_cast_fp16)[name = tensor("op_1063_cast_fp16")]; + tensor mh_w_15_transpose_x_0 = const()[name = tensor("mh_w_15_transpose_x_0"), val = tensor(true)]; + tensor mh_w_15_transpose_y_0 = const()[name = tensor("mh_w_15_transpose_y_0"), val = tensor(false)]; + tensor mh_w_15_cast_fp16 = matmul(transpose_x = mh_w_15_transpose_x_0, transpose_y = mh_w_15_transpose_y_0, x = var_1061_cast_fp16, y = var_1063_cast_fp16)[name = tensor("mh_w_15_cast_fp16")]; + tensor var_1066_cast_fp16 = softmax(axis = var_1004, x = mh_w_15_cast_fp16)[name = tensor("op_1066_cast_fp16")]; + tensor var_1067 = const()[name = tensor("op_1067"), val = tensor([1, 20, 64, -1])]; + tensor var_1068_cast_fp16 = reshape(shape = var_1067, x = value_15_cast_fp16)[name = tensor("op_1068_cast_fp16")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast_fp16 = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1068_cast_fp16, y = var_1066_cast_fp16)[name = tensor("attn_15_cast_fp16")]; + tensor var_1071 = const()[name = tensor("op_1071"), val = tensor([1, 1280, 1, -1])]; + tensor input_57_cast_fp16 = reshape(shape = var_1071, x = attn_15_cast_fp16)[name = tensor("input_57_cast_fp16")]; + tensor var_1075 = const()[name = tensor("op_1075"), val = tensor([1, 1])]; + tensor var_1077 = const()[name = tensor("op_1077"), val = tensor([1, 1])]; + tensor obj_31_pad_type_0 = const()[name = tensor("obj_31_pad_type_0"), val = tensor("custom")]; + tensor obj_31_pad_0 = const()[name = tensor("obj_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_7_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(299609600)))]; + tensor layers_7_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_7_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302886464)))]; + tensor obj_31_cast_fp16 = conv(bias = layers_7_self_attn_o_proj_bias_to_fp16, dilations = var_1077, groups = var_1006, pad = obj_31_pad_0, pad_type = obj_31_pad_type_0, strides = var_1075, weight = layers_7_self_attn_o_proj_weight_to_fp16, x = input_57_cast_fp16)[name = tensor("obj_31_cast_fp16")]; + tensor inputs_31_cast_fp16 = add(x = inputs_29_cast_fp16, y = obj_31_cast_fp16)[name = tensor("inputs_31_cast_fp16")]; + tensor var_1083 = const()[name = tensor("op_1083"), val = tensor([1])]; + tensor channels_mean_31_cast_fp16 = reduce_mean(axes = var_1083, keep_dims = var_1007, x = inputs_31_cast_fp16)[name = tensor("channels_mean_31_cast_fp16")]; + tensor zero_mean_31_cast_fp16 = sub(x = inputs_31_cast_fp16, y = channels_mean_31_cast_fp16)[name = tensor("zero_mean_31_cast_fp16")]; + tensor zero_mean_sq_31_cast_fp16 = mul(x = zero_mean_31_cast_fp16, y = zero_mean_31_cast_fp16)[name = tensor("zero_mean_sq_31_cast_fp16")]; + tensor var_1087 = const()[name = tensor("op_1087"), val = tensor([1])]; + tensor var_1088_cast_fp16 = reduce_mean(axes = var_1087, keep_dims = var_1007, x = zero_mean_sq_31_cast_fp16)[name = tensor("op_1088_cast_fp16")]; + tensor var_1089_to_fp16 = const()[name = tensor("op_1089_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1090_cast_fp16 = add(x = var_1088_cast_fp16, y = var_1089_to_fp16)[name = tensor("op_1090_cast_fp16")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast_fp16 = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1090_cast_fp16)[name = tensor("denom_31_cast_fp16")]; + tensor out_31_cast_fp16 = mul(x = zero_mean_31_cast_fp16, y = denom_31_cast_fp16)[name = tensor("out_31_cast_fp16")]; + tensor input_59_gamma_0_to_fp16 = const()[name = tensor("input_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302889088)))]; + tensor input_59_beta_0_to_fp16 = const()[name = tensor("input_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302891712)))]; + tensor input_59_epsilon_0_to_fp16 = const()[name = tensor("input_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_59_cast_fp16 = batch_norm(beta = input_59_beta_0_to_fp16, epsilon = input_59_epsilon_0_to_fp16, gamma = input_59_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_31_cast_fp16)[name = tensor("input_59_cast_fp16")]; + tensor var_1101 = const()[name = tensor("op_1101"), val = tensor([1, 1])]; + tensor var_1103 = const()[name = tensor("op_1103"), val = tensor([1, 1])]; + tensor input_61_pad_type_0 = const()[name = tensor("input_61_pad_type_0"), val = tensor("custom")]; + tensor input_61_pad_0 = const()[name = tensor("input_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_fc1_weight_to_fp16 = const()[name = tensor("layers_7_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302894336)))]; + tensor layers_7_fc1_bias_to_fp16 = const()[name = tensor("layers_7_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(316001600)))]; + tensor input_61_cast_fp16 = conv(bias = layers_7_fc1_bias_to_fp16, dilations = var_1103, groups = var_1006, pad = input_61_pad_0, pad_type = input_61_pad_type_0, strides = var_1101, weight = layers_7_fc1_weight_to_fp16, x = input_59_cast_fp16)[name = tensor("input_61_cast_fp16")]; + tensor input_63_mode_0 = const()[name = tensor("input_63_mode_0"), val = tensor("EXACT")]; + tensor input_63_cast_fp16 = gelu(mode = input_63_mode_0, x = input_61_cast_fp16)[name = tensor("input_63_cast_fp16")]; + tensor var_1109 = const()[name = tensor("op_1109"), val = tensor([1, 1])]; + tensor var_1111 = const()[name = tensor("op_1111"), val = tensor([1, 1])]; + tensor hidden_states_19_pad_type_0 = const()[name = tensor("hidden_states_19_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_19_pad_0 = const()[name = tensor("hidden_states_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_7_fc2_weight_to_fp16 = const()[name = tensor("layers_7_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(316011904)))]; + tensor layers_7_fc2_bias_to_fp16 = const()[name = tensor("layers_7_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(329119168)))]; + tensor hidden_states_19_cast_fp16 = conv(bias = layers_7_fc2_bias_to_fp16, dilations = var_1111, groups = var_1006, pad = hidden_states_19_pad_0, pad_type = hidden_states_19_pad_type_0, strides = var_1109, weight = layers_7_fc2_weight_to_fp16, x = input_63_cast_fp16)[name = tensor("hidden_states_19_cast_fp16")]; + tensor inputs_33_cast_fp16 = add(x = inputs_31_cast_fp16, y = hidden_states_19_cast_fp16)[name = tensor("inputs_33_cast_fp16")]; + tensor var_1122 = const()[name = tensor("op_1122"), val = tensor(3)]; + tensor var_1124 = const()[name = tensor("op_1124"), val = tensor(1)]; + tensor var_1125 = const()[name = tensor("op_1125"), val = tensor(true)]; + tensor var_1135 = const()[name = tensor("op_1135"), val = tensor([1])]; + tensor channels_mean_33_cast_fp16 = reduce_mean(axes = var_1135, keep_dims = var_1125, x = inputs_33_cast_fp16)[name = tensor("channels_mean_33_cast_fp16")]; + tensor zero_mean_33_cast_fp16 = sub(x = inputs_33_cast_fp16, y = channels_mean_33_cast_fp16)[name = tensor("zero_mean_33_cast_fp16")]; + tensor zero_mean_sq_33_cast_fp16 = mul(x = zero_mean_33_cast_fp16, y = zero_mean_33_cast_fp16)[name = tensor("zero_mean_sq_33_cast_fp16")]; + tensor var_1139 = const()[name = tensor("op_1139"), val = tensor([1])]; + tensor var_1140_cast_fp16 = reduce_mean(axes = var_1139, keep_dims = var_1125, x = zero_mean_sq_33_cast_fp16)[name = tensor("op_1140_cast_fp16")]; + tensor var_1141_to_fp16 = const()[name = tensor("op_1141_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1142_cast_fp16 = add(x = var_1140_cast_fp16, y = var_1141_to_fp16)[name = tensor("op_1142_cast_fp16")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast_fp16 = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1142_cast_fp16)[name = tensor("denom_33_cast_fp16")]; + tensor out_33_cast_fp16 = mul(x = zero_mean_33_cast_fp16, y = denom_33_cast_fp16)[name = tensor("out_33_cast_fp16")]; + tensor obj_33_gamma_0_to_fp16 = const()[name = tensor("obj_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(329121792)))]; + tensor obj_33_beta_0_to_fp16 = const()[name = tensor("obj_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(329124416)))]; + tensor obj_33_epsilon_0_to_fp16 = const()[name = tensor("obj_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_33_cast_fp16 = batch_norm(beta = obj_33_beta_0_to_fp16, epsilon = obj_33_epsilon_0_to_fp16, gamma = obj_33_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_33_cast_fp16)[name = tensor("obj_33_cast_fp16")]; + tensor var_1157 = const()[name = tensor("op_1157"), val = tensor([1, 1])]; + tensor var_1159 = const()[name = tensor("op_1159"), val = tensor([1, 1])]; + tensor query_17_pad_type_0 = const()[name = tensor("query_17_pad_type_0"), val = tensor("custom")]; + tensor query_17_pad_0 = const()[name = tensor("query_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_8_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(329127040)))]; + tensor layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332403904)))]; + tensor query_17_cast_fp16 = conv(bias = layers_8_self_attn_q_proj_bias_to_fp16, dilations = var_1159, groups = var_1124, pad = query_17_pad_0, pad_type = query_17_pad_type_0, strides = var_1157, weight = layers_8_self_attn_q_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = tensor("query_17_cast_fp16")]; + tensor var_1163 = const()[name = tensor("op_1163"), val = tensor([1, 1])]; + tensor var_1165 = const()[name = tensor("op_1165"), val = tensor([1, 1])]; + tensor key_17_pad_type_0 = const()[name = tensor("key_17_pad_type_0"), val = tensor("custom")]; + tensor key_17_pad_0 = const()[name = tensor("key_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_8_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332406528)))]; + tensor key_17_cast_fp16 = conv(dilations = var_1165, groups = var_1124, pad = key_17_pad_0, pad_type = key_17_pad_type_0, strides = var_1163, weight = layers_8_self_attn_k_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = tensor("key_17_cast_fp16")]; + tensor var_1170 = const()[name = tensor("op_1170"), val = tensor([1, 1])]; + tensor var_1172 = const()[name = tensor("op_1172"), val = tensor([1, 1])]; + tensor value_17_pad_type_0 = const()[name = tensor("value_17_pad_type_0"), val = tensor("custom")]; + tensor value_17_pad_0 = const()[name = tensor("value_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_8_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335683392)))]; + tensor layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338960256)))]; + tensor value_17_cast_fp16 = conv(bias = layers_8_self_attn_v_proj_bias_to_fp16, dilations = var_1172, groups = var_1124, pad = value_17_pad_0, pad_type = value_17_pad_type_0, strides = var_1170, weight = layers_8_self_attn_v_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = tensor("value_17_cast_fp16")]; + tensor var_1176 = const()[name = tensor("op_1176"), val = tensor([1, 20, 64, -1])]; + tensor var_1177_cast_fp16 = reshape(shape = var_1176, x = query_17_cast_fp16)[name = tensor("op_1177_cast_fp16")]; + tensor var_1178_to_fp16 = const()[name = tensor("op_1178_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1179_cast_fp16 = mul(x = var_1177_cast_fp16, y = var_1178_to_fp16)[name = tensor("op_1179_cast_fp16")]; + tensor var_1180 = const()[name = tensor("op_1180"), val = tensor([1, 20, 64, -1])]; + tensor var_1181_cast_fp16 = reshape(shape = var_1180, x = key_17_cast_fp16)[name = tensor("op_1181_cast_fp16")]; + tensor mh_w_17_transpose_x_0 = const()[name = tensor("mh_w_17_transpose_x_0"), val = tensor(true)]; + tensor mh_w_17_transpose_y_0 = const()[name = tensor("mh_w_17_transpose_y_0"), val = tensor(false)]; + tensor mh_w_17_cast_fp16 = matmul(transpose_x = mh_w_17_transpose_x_0, transpose_y = mh_w_17_transpose_y_0, x = var_1179_cast_fp16, y = var_1181_cast_fp16)[name = tensor("mh_w_17_cast_fp16")]; + tensor var_1184_cast_fp16 = softmax(axis = var_1122, x = mh_w_17_cast_fp16)[name = tensor("op_1184_cast_fp16")]; + tensor var_1185 = const()[name = tensor("op_1185"), val = tensor([1, 20, 64, -1])]; + tensor var_1186_cast_fp16 = reshape(shape = var_1185, x = value_17_cast_fp16)[name = tensor("op_1186_cast_fp16")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast_fp16 = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1186_cast_fp16, y = var_1184_cast_fp16)[name = tensor("attn_17_cast_fp16")]; + tensor var_1189 = const()[name = tensor("op_1189"), val = tensor([1, 1280, 1, -1])]; + tensor input_65_cast_fp16 = reshape(shape = var_1189, x = attn_17_cast_fp16)[name = tensor("input_65_cast_fp16")]; + tensor var_1193 = const()[name = tensor("op_1193"), val = tensor([1, 1])]; + tensor var_1195 = const()[name = tensor("op_1195"), val = tensor([1, 1])]; + tensor obj_35_pad_type_0 = const()[name = tensor("obj_35_pad_type_0"), val = tensor("custom")]; + tensor obj_35_pad_0 = const()[name = tensor("obj_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_8_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338962880)))]; + tensor layers_8_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_8_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(342239744)))]; + tensor obj_35_cast_fp16 = conv(bias = layers_8_self_attn_o_proj_bias_to_fp16, dilations = var_1195, groups = var_1124, pad = obj_35_pad_0, pad_type = obj_35_pad_type_0, strides = var_1193, weight = layers_8_self_attn_o_proj_weight_to_fp16, x = input_65_cast_fp16)[name = tensor("obj_35_cast_fp16")]; + tensor inputs_35_cast_fp16 = add(x = inputs_33_cast_fp16, y = obj_35_cast_fp16)[name = tensor("inputs_35_cast_fp16")]; + tensor var_1201 = const()[name = tensor("op_1201"), val = tensor([1])]; + tensor channels_mean_35_cast_fp16 = reduce_mean(axes = var_1201, keep_dims = var_1125, x = inputs_35_cast_fp16)[name = tensor("channels_mean_35_cast_fp16")]; + tensor zero_mean_35_cast_fp16 = sub(x = inputs_35_cast_fp16, y = channels_mean_35_cast_fp16)[name = tensor("zero_mean_35_cast_fp16")]; + tensor zero_mean_sq_35_cast_fp16 = mul(x = zero_mean_35_cast_fp16, y = zero_mean_35_cast_fp16)[name = tensor("zero_mean_sq_35_cast_fp16")]; + tensor var_1205 = const()[name = tensor("op_1205"), val = tensor([1])]; + tensor var_1206_cast_fp16 = reduce_mean(axes = var_1205, keep_dims = var_1125, x = zero_mean_sq_35_cast_fp16)[name = tensor("op_1206_cast_fp16")]; + tensor var_1207_to_fp16 = const()[name = tensor("op_1207_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1208_cast_fp16 = add(x = var_1206_cast_fp16, y = var_1207_to_fp16)[name = tensor("op_1208_cast_fp16")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast_fp16 = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1208_cast_fp16)[name = tensor("denom_35_cast_fp16")]; + tensor out_35_cast_fp16 = mul(x = zero_mean_35_cast_fp16, y = denom_35_cast_fp16)[name = tensor("out_35_cast_fp16")]; + tensor input_67_gamma_0_to_fp16 = const()[name = tensor("input_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(342242368)))]; + tensor input_67_beta_0_to_fp16 = const()[name = tensor("input_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(342244992)))]; + tensor input_67_epsilon_0_to_fp16 = const()[name = tensor("input_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_67_cast_fp16 = batch_norm(beta = input_67_beta_0_to_fp16, epsilon = input_67_epsilon_0_to_fp16, gamma = input_67_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_35_cast_fp16)[name = tensor("input_67_cast_fp16")]; + tensor var_1219 = const()[name = tensor("op_1219"), val = tensor([1, 1])]; + tensor var_1221 = const()[name = tensor("op_1221"), val = tensor([1, 1])]; + tensor input_69_pad_type_0 = const()[name = tensor("input_69_pad_type_0"), val = tensor("custom")]; + tensor input_69_pad_0 = const()[name = tensor("input_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_fc1_weight_to_fp16 = const()[name = tensor("layers_8_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(342247616)))]; + tensor layers_8_fc1_bias_to_fp16 = const()[name = tensor("layers_8_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(355354880)))]; + tensor input_69_cast_fp16 = conv(bias = layers_8_fc1_bias_to_fp16, dilations = var_1221, groups = var_1124, pad = input_69_pad_0, pad_type = input_69_pad_type_0, strides = var_1219, weight = layers_8_fc1_weight_to_fp16, x = input_67_cast_fp16)[name = tensor("input_69_cast_fp16")]; + tensor input_71_mode_0 = const()[name = tensor("input_71_mode_0"), val = tensor("EXACT")]; + tensor input_71_cast_fp16 = gelu(mode = input_71_mode_0, x = input_69_cast_fp16)[name = tensor("input_71_cast_fp16")]; + tensor var_1227 = const()[name = tensor("op_1227"), val = tensor([1, 1])]; + tensor var_1229 = const()[name = tensor("op_1229"), val = tensor([1, 1])]; + tensor hidden_states_21_pad_type_0 = const()[name = tensor("hidden_states_21_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_21_pad_0 = const()[name = tensor("hidden_states_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_8_fc2_weight_to_fp16 = const()[name = tensor("layers_8_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(355365184)))]; + tensor layers_8_fc2_bias_to_fp16 = const()[name = tensor("layers_8_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368472448)))]; + tensor hidden_states_21_cast_fp16 = conv(bias = layers_8_fc2_bias_to_fp16, dilations = var_1229, groups = var_1124, pad = hidden_states_21_pad_0, pad_type = hidden_states_21_pad_type_0, strides = var_1227, weight = layers_8_fc2_weight_to_fp16, x = input_71_cast_fp16)[name = tensor("hidden_states_21_cast_fp16")]; + tensor inputs_37_cast_fp16 = add(x = inputs_35_cast_fp16, y = hidden_states_21_cast_fp16)[name = tensor("inputs_37_cast_fp16")]; + tensor var_1240 = const()[name = tensor("op_1240"), val = tensor(3)]; + tensor var_1242 = const()[name = tensor("op_1242"), val = tensor(1)]; + tensor var_1243 = const()[name = tensor("op_1243"), val = tensor(true)]; + tensor var_1253 = const()[name = tensor("op_1253"), val = tensor([1])]; + tensor channels_mean_37_cast_fp16 = reduce_mean(axes = var_1253, keep_dims = var_1243, x = inputs_37_cast_fp16)[name = tensor("channels_mean_37_cast_fp16")]; + tensor zero_mean_37_cast_fp16 = sub(x = inputs_37_cast_fp16, y = channels_mean_37_cast_fp16)[name = tensor("zero_mean_37_cast_fp16")]; + tensor zero_mean_sq_37_cast_fp16 = mul(x = zero_mean_37_cast_fp16, y = zero_mean_37_cast_fp16)[name = tensor("zero_mean_sq_37_cast_fp16")]; + tensor var_1257 = const()[name = tensor("op_1257"), val = tensor([1])]; + tensor var_1258_cast_fp16 = reduce_mean(axes = var_1257, keep_dims = var_1243, x = zero_mean_sq_37_cast_fp16)[name = tensor("op_1258_cast_fp16")]; + tensor var_1259_to_fp16 = const()[name = tensor("op_1259_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1260_cast_fp16 = add(x = var_1258_cast_fp16, y = var_1259_to_fp16)[name = tensor("op_1260_cast_fp16")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast_fp16 = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1260_cast_fp16)[name = tensor("denom_37_cast_fp16")]; + tensor out_37_cast_fp16 = mul(x = zero_mean_37_cast_fp16, y = denom_37_cast_fp16)[name = tensor("out_37_cast_fp16")]; + tensor obj_37_gamma_0_to_fp16 = const()[name = tensor("obj_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368475072)))]; + tensor obj_37_beta_0_to_fp16 = const()[name = tensor("obj_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368477696)))]; + tensor obj_37_epsilon_0_to_fp16 = const()[name = tensor("obj_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_37_cast_fp16 = batch_norm(beta = obj_37_beta_0_to_fp16, epsilon = obj_37_epsilon_0_to_fp16, gamma = obj_37_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_37_cast_fp16)[name = tensor("obj_37_cast_fp16")]; + tensor var_1275 = const()[name = tensor("op_1275"), val = tensor([1, 1])]; + tensor var_1277 = const()[name = tensor("op_1277"), val = tensor([1, 1])]; + tensor query_19_pad_type_0 = const()[name = tensor("query_19_pad_type_0"), val = tensor("custom")]; + tensor query_19_pad_0 = const()[name = tensor("query_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_9_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368480320)))]; + tensor layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371757184)))]; + tensor query_19_cast_fp16 = conv(bias = layers_9_self_attn_q_proj_bias_to_fp16, dilations = var_1277, groups = var_1242, pad = query_19_pad_0, pad_type = query_19_pad_type_0, strides = var_1275, weight = layers_9_self_attn_q_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor("query_19_cast_fp16")]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([1, 1])]; + tensor var_1283 = const()[name = tensor("op_1283"), val = tensor([1, 1])]; + tensor key_19_pad_type_0 = const()[name = tensor("key_19_pad_type_0"), val = tensor("custom")]; + tensor key_19_pad_0 = const()[name = tensor("key_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_9_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371759808)))]; + tensor key_19_cast_fp16 = conv(dilations = var_1283, groups = var_1242, pad = key_19_pad_0, pad_type = key_19_pad_type_0, strides = var_1281, weight = layers_9_self_attn_k_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor("key_19_cast_fp16")]; + tensor var_1288 = const()[name = tensor("op_1288"), val = tensor([1, 1])]; + tensor var_1290 = const()[name = tensor("op_1290"), val = tensor([1, 1])]; + tensor value_19_pad_type_0 = const()[name = tensor("value_19_pad_type_0"), val = tensor("custom")]; + tensor value_19_pad_0 = const()[name = tensor("value_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_9_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375036672)))]; + tensor layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378313536)))]; + tensor value_19_cast_fp16 = conv(bias = layers_9_self_attn_v_proj_bias_to_fp16, dilations = var_1290, groups = var_1242, pad = value_19_pad_0, pad_type = value_19_pad_type_0, strides = var_1288, weight = layers_9_self_attn_v_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor("value_19_cast_fp16")]; + tensor var_1294 = const()[name = tensor("op_1294"), val = tensor([1, 20, 64, -1])]; + tensor var_1295_cast_fp16 = reshape(shape = var_1294, x = query_19_cast_fp16)[name = tensor("op_1295_cast_fp16")]; + tensor var_1296_to_fp16 = const()[name = tensor("op_1296_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1297_cast_fp16 = mul(x = var_1295_cast_fp16, y = var_1296_to_fp16)[name = tensor("op_1297_cast_fp16")]; + tensor var_1298 = const()[name = tensor("op_1298"), val = tensor([1, 20, 64, -1])]; + tensor var_1299_cast_fp16 = reshape(shape = var_1298, x = key_19_cast_fp16)[name = tensor("op_1299_cast_fp16")]; + tensor mh_w_19_transpose_x_0 = const()[name = tensor("mh_w_19_transpose_x_0"), val = tensor(true)]; + tensor mh_w_19_transpose_y_0 = const()[name = tensor("mh_w_19_transpose_y_0"), val = tensor(false)]; + tensor mh_w_19_cast_fp16 = matmul(transpose_x = mh_w_19_transpose_x_0, transpose_y = mh_w_19_transpose_y_0, x = var_1297_cast_fp16, y = var_1299_cast_fp16)[name = tensor("mh_w_19_cast_fp16")]; + tensor var_1302_cast_fp16 = softmax(axis = var_1240, x = mh_w_19_cast_fp16)[name = tensor("op_1302_cast_fp16")]; + tensor var_1303 = const()[name = tensor("op_1303"), val = tensor([1, 20, 64, -1])]; + tensor var_1304_cast_fp16 = reshape(shape = var_1303, x = value_19_cast_fp16)[name = tensor("op_1304_cast_fp16")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast_fp16 = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1304_cast_fp16, y = var_1302_cast_fp16)[name = tensor("attn_19_cast_fp16")]; + tensor var_1307 = const()[name = tensor("op_1307"), val = tensor([1, 1280, 1, -1])]; + tensor input_73_cast_fp16 = reshape(shape = var_1307, x = attn_19_cast_fp16)[name = tensor("input_73_cast_fp16")]; + tensor var_1311 = const()[name = tensor("op_1311"), val = tensor([1, 1])]; + tensor var_1313 = const()[name = tensor("op_1313"), val = tensor([1, 1])]; + tensor obj_39_pad_type_0 = const()[name = tensor("obj_39_pad_type_0"), val = tensor("custom")]; + tensor obj_39_pad_0 = const()[name = tensor("obj_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_9_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378316160)))]; + tensor layers_9_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_9_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(381593024)))]; + tensor obj_39_cast_fp16 = conv(bias = layers_9_self_attn_o_proj_bias_to_fp16, dilations = var_1313, groups = var_1242, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = var_1311, weight = layers_9_self_attn_o_proj_weight_to_fp16, x = input_73_cast_fp16)[name = tensor("obj_39_cast_fp16")]; + tensor inputs_39_cast_fp16 = add(x = inputs_37_cast_fp16, y = obj_39_cast_fp16)[name = tensor("inputs_39_cast_fp16")]; + tensor var_1319 = const()[name = tensor("op_1319"), val = tensor([1])]; + tensor channels_mean_39_cast_fp16 = reduce_mean(axes = var_1319, keep_dims = var_1243, x = inputs_39_cast_fp16)[name = tensor("channels_mean_39_cast_fp16")]; + tensor zero_mean_39_cast_fp16 = sub(x = inputs_39_cast_fp16, y = channels_mean_39_cast_fp16)[name = tensor("zero_mean_39_cast_fp16")]; + tensor zero_mean_sq_39_cast_fp16 = mul(x = zero_mean_39_cast_fp16, y = zero_mean_39_cast_fp16)[name = tensor("zero_mean_sq_39_cast_fp16")]; + tensor var_1323 = const()[name = tensor("op_1323"), val = tensor([1])]; + tensor var_1324_cast_fp16 = reduce_mean(axes = var_1323, keep_dims = var_1243, x = zero_mean_sq_39_cast_fp16)[name = tensor("op_1324_cast_fp16")]; + tensor var_1325_to_fp16 = const()[name = tensor("op_1325_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1326_cast_fp16 = add(x = var_1324_cast_fp16, y = var_1325_to_fp16)[name = tensor("op_1326_cast_fp16")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast_fp16 = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1326_cast_fp16)[name = tensor("denom_39_cast_fp16")]; + tensor out_39_cast_fp16 = mul(x = zero_mean_39_cast_fp16, y = denom_39_cast_fp16)[name = tensor("out_39_cast_fp16")]; + tensor input_75_gamma_0_to_fp16 = const()[name = tensor("input_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(381595648)))]; + tensor input_75_beta_0_to_fp16 = const()[name = tensor("input_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(381598272)))]; + tensor input_75_epsilon_0_to_fp16 = const()[name = tensor("input_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_75_cast_fp16 = batch_norm(beta = input_75_beta_0_to_fp16, epsilon = input_75_epsilon_0_to_fp16, gamma = input_75_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_39_cast_fp16)[name = tensor("input_75_cast_fp16")]; + tensor var_1337 = const()[name = tensor("op_1337"), val = tensor([1, 1])]; + tensor var_1339 = const()[name = tensor("op_1339"), val = tensor([1, 1])]; + tensor input_77_pad_type_0 = const()[name = tensor("input_77_pad_type_0"), val = tensor("custom")]; + tensor input_77_pad_0 = const()[name = tensor("input_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_fc1_weight_to_fp16 = const()[name = tensor("layers_9_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(381600896)))]; + tensor layers_9_fc1_bias_to_fp16 = const()[name = tensor("layers_9_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(394708160)))]; + tensor input_77_cast_fp16 = conv(bias = layers_9_fc1_bias_to_fp16, dilations = var_1339, groups = var_1242, pad = input_77_pad_0, pad_type = input_77_pad_type_0, strides = var_1337, weight = layers_9_fc1_weight_to_fp16, x = input_75_cast_fp16)[name = tensor("input_77_cast_fp16")]; + tensor input_79_mode_0 = const()[name = tensor("input_79_mode_0"), val = tensor("EXACT")]; + tensor input_79_cast_fp16 = gelu(mode = input_79_mode_0, x = input_77_cast_fp16)[name = tensor("input_79_cast_fp16")]; + tensor var_1345 = const()[name = tensor("op_1345"), val = tensor([1, 1])]; + tensor var_1347 = const()[name = tensor("op_1347"), val = tensor([1, 1])]; + tensor hidden_states_23_pad_type_0 = const()[name = tensor("hidden_states_23_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_23_pad_0 = const()[name = tensor("hidden_states_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_9_fc2_weight_to_fp16 = const()[name = tensor("layers_9_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(394718464)))]; + tensor layers_9_fc2_bias_to_fp16 = const()[name = tensor("layers_9_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407825728)))]; + tensor hidden_states_23_cast_fp16 = conv(bias = layers_9_fc2_bias_to_fp16, dilations = var_1347, groups = var_1242, pad = hidden_states_23_pad_0, pad_type = hidden_states_23_pad_type_0, strides = var_1345, weight = layers_9_fc2_weight_to_fp16, x = input_79_cast_fp16)[name = tensor("hidden_states_23_cast_fp16")]; + tensor inputs_41_cast_fp16 = add(x = inputs_39_cast_fp16, y = hidden_states_23_cast_fp16)[name = tensor("inputs_41_cast_fp16")]; + tensor var_1358 = const()[name = tensor("op_1358"), val = tensor(3)]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor(1)]; + tensor var_1361 = const()[name = tensor("op_1361"), val = tensor(true)]; + tensor var_1371 = const()[name = tensor("op_1371"), val = tensor([1])]; + tensor channels_mean_41_cast_fp16 = reduce_mean(axes = var_1371, keep_dims = var_1361, x = inputs_41_cast_fp16)[name = tensor("channels_mean_41_cast_fp16")]; + tensor zero_mean_41_cast_fp16 = sub(x = inputs_41_cast_fp16, y = channels_mean_41_cast_fp16)[name = tensor("zero_mean_41_cast_fp16")]; + tensor zero_mean_sq_41_cast_fp16 = mul(x = zero_mean_41_cast_fp16, y = zero_mean_41_cast_fp16)[name = tensor("zero_mean_sq_41_cast_fp16")]; + tensor var_1375 = const()[name = tensor("op_1375"), val = tensor([1])]; + tensor var_1376_cast_fp16 = reduce_mean(axes = var_1375, keep_dims = var_1361, x = zero_mean_sq_41_cast_fp16)[name = tensor("op_1376_cast_fp16")]; + tensor var_1377_to_fp16 = const()[name = tensor("op_1377_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1378_cast_fp16 = add(x = var_1376_cast_fp16, y = var_1377_to_fp16)[name = tensor("op_1378_cast_fp16")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast_fp16 = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1378_cast_fp16)[name = tensor("denom_41_cast_fp16")]; + tensor out_41_cast_fp16 = mul(x = zero_mean_41_cast_fp16, y = denom_41_cast_fp16)[name = tensor("out_41_cast_fp16")]; + tensor obj_41_gamma_0_to_fp16 = const()[name = tensor("obj_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407828352)))]; + tensor obj_41_beta_0_to_fp16 = const()[name = tensor("obj_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407830976)))]; + tensor obj_41_epsilon_0_to_fp16 = const()[name = tensor("obj_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_41_cast_fp16 = batch_norm(beta = obj_41_beta_0_to_fp16, epsilon = obj_41_epsilon_0_to_fp16, gamma = obj_41_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_41_cast_fp16)[name = tensor("obj_41_cast_fp16")]; + tensor var_1393 = const()[name = tensor("op_1393"), val = tensor([1, 1])]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([1, 1])]; + tensor query_21_pad_type_0 = const()[name = tensor("query_21_pad_type_0"), val = tensor("custom")]; + tensor query_21_pad_0 = const()[name = tensor("query_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_10_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407833600)))]; + tensor layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(411110464)))]; + tensor query_21_cast_fp16 = conv(bias = layers_10_self_attn_q_proj_bias_to_fp16, dilations = var_1395, groups = var_1360, pad = query_21_pad_0, pad_type = query_21_pad_type_0, strides = var_1393, weight = layers_10_self_attn_q_proj_weight_to_fp16, x = obj_41_cast_fp16)[name = tensor("query_21_cast_fp16")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([1, 1])]; + tensor var_1401 = const()[name = tensor("op_1401"), val = tensor([1, 1])]; + tensor key_21_pad_type_0 = const()[name = tensor("key_21_pad_type_0"), val = tensor("custom")]; + tensor key_21_pad_0 = const()[name = tensor("key_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_10_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(411113088)))]; + tensor key_21_cast_fp16 = conv(dilations = var_1401, groups = var_1360, pad = key_21_pad_0, pad_type = key_21_pad_type_0, strides = var_1399, weight = layers_10_self_attn_k_proj_weight_to_fp16, x = obj_41_cast_fp16)[name = tensor("key_21_cast_fp16")]; + tensor var_1406 = const()[name = tensor("op_1406"), val = tensor([1, 1])]; + tensor var_1408 = const()[name = tensor("op_1408"), val = tensor([1, 1])]; + tensor value_21_pad_type_0 = const()[name = tensor("value_21_pad_type_0"), val = tensor("custom")]; + tensor value_21_pad_0 = const()[name = tensor("value_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_10_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414389952)))]; + tensor layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417666816)))]; + tensor value_21_cast_fp16 = conv(bias = layers_10_self_attn_v_proj_bias_to_fp16, dilations = var_1408, groups = var_1360, pad = value_21_pad_0, pad_type = value_21_pad_type_0, strides = var_1406, weight = layers_10_self_attn_v_proj_weight_to_fp16, x = obj_41_cast_fp16)[name = tensor("value_21_cast_fp16")]; + tensor var_1412 = const()[name = tensor("op_1412"), val = tensor([1, 20, 64, -1])]; + tensor var_1413_cast_fp16 = reshape(shape = var_1412, x = query_21_cast_fp16)[name = tensor("op_1413_cast_fp16")]; + tensor var_1414_to_fp16 = const()[name = tensor("op_1414_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1415_cast_fp16 = mul(x = var_1413_cast_fp16, y = var_1414_to_fp16)[name = tensor("op_1415_cast_fp16")]; + tensor var_1416 = const()[name = tensor("op_1416"), val = tensor([1, 20, 64, -1])]; + tensor var_1417_cast_fp16 = reshape(shape = var_1416, x = key_21_cast_fp16)[name = tensor("op_1417_cast_fp16")]; + tensor mh_w_21_transpose_x_0 = const()[name = tensor("mh_w_21_transpose_x_0"), val = tensor(true)]; + tensor mh_w_21_transpose_y_0 = const()[name = tensor("mh_w_21_transpose_y_0"), val = tensor(false)]; + tensor mh_w_21_cast_fp16 = matmul(transpose_x = mh_w_21_transpose_x_0, transpose_y = mh_w_21_transpose_y_0, x = var_1415_cast_fp16, y = var_1417_cast_fp16)[name = tensor("mh_w_21_cast_fp16")]; + tensor var_1420_cast_fp16 = softmax(axis = var_1358, x = mh_w_21_cast_fp16)[name = tensor("op_1420_cast_fp16")]; + tensor var_1421 = const()[name = tensor("op_1421"), val = tensor([1, 20, 64, -1])]; + tensor var_1422_cast_fp16 = reshape(shape = var_1421, x = value_21_cast_fp16)[name = tensor("op_1422_cast_fp16")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast_fp16 = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1422_cast_fp16, y = var_1420_cast_fp16)[name = tensor("attn_21_cast_fp16")]; + tensor var_1425 = const()[name = tensor("op_1425"), val = tensor([1, 1280, 1, -1])]; + tensor input_81_cast_fp16 = reshape(shape = var_1425, x = attn_21_cast_fp16)[name = tensor("input_81_cast_fp16")]; + tensor var_1429 = const()[name = tensor("op_1429"), val = tensor([1, 1])]; + tensor var_1431 = const()[name = tensor("op_1431"), val = tensor([1, 1])]; + tensor obj_43_pad_type_0 = const()[name = tensor("obj_43_pad_type_0"), val = tensor("custom")]; + tensor obj_43_pad_0 = const()[name = tensor("obj_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_10_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417669440)))]; + tensor layers_10_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_10_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420946304)))]; + tensor obj_43_cast_fp16 = conv(bias = layers_10_self_attn_o_proj_bias_to_fp16, dilations = var_1431, groups = var_1360, pad = obj_43_pad_0, pad_type = obj_43_pad_type_0, strides = var_1429, weight = layers_10_self_attn_o_proj_weight_to_fp16, x = input_81_cast_fp16)[name = tensor("obj_43_cast_fp16")]; + tensor inputs_43_cast_fp16 = add(x = inputs_41_cast_fp16, y = obj_43_cast_fp16)[name = tensor("inputs_43_cast_fp16")]; + tensor var_1437 = const()[name = tensor("op_1437"), val = tensor([1])]; + tensor channels_mean_43_cast_fp16 = reduce_mean(axes = var_1437, keep_dims = var_1361, x = inputs_43_cast_fp16)[name = tensor("channels_mean_43_cast_fp16")]; + tensor zero_mean_43_cast_fp16 = sub(x = inputs_43_cast_fp16, y = channels_mean_43_cast_fp16)[name = tensor("zero_mean_43_cast_fp16")]; + tensor zero_mean_sq_43_cast_fp16 = mul(x = zero_mean_43_cast_fp16, y = zero_mean_43_cast_fp16)[name = tensor("zero_mean_sq_43_cast_fp16")]; + tensor var_1441 = const()[name = tensor("op_1441"), val = tensor([1])]; + tensor var_1442_cast_fp16 = reduce_mean(axes = var_1441, keep_dims = var_1361, x = zero_mean_sq_43_cast_fp16)[name = tensor("op_1442_cast_fp16")]; + tensor var_1443_to_fp16 = const()[name = tensor("op_1443_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1444_cast_fp16 = add(x = var_1442_cast_fp16, y = var_1443_to_fp16)[name = tensor("op_1444_cast_fp16")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast_fp16 = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1444_cast_fp16)[name = tensor("denom_43_cast_fp16")]; + tensor out_43_cast_fp16 = mul(x = zero_mean_43_cast_fp16, y = denom_43_cast_fp16)[name = tensor("out_43_cast_fp16")]; + tensor input_83_gamma_0_to_fp16 = const()[name = tensor("input_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420948928)))]; + tensor input_83_beta_0_to_fp16 = const()[name = tensor("input_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420951552)))]; + tensor input_83_epsilon_0_to_fp16 = const()[name = tensor("input_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_83_cast_fp16 = batch_norm(beta = input_83_beta_0_to_fp16, epsilon = input_83_epsilon_0_to_fp16, gamma = input_83_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_43_cast_fp16)[name = tensor("input_83_cast_fp16")]; + tensor var_1455 = const()[name = tensor("op_1455"), val = tensor([1, 1])]; + tensor var_1457 = const()[name = tensor("op_1457"), val = tensor([1, 1])]; + tensor input_85_pad_type_0 = const()[name = tensor("input_85_pad_type_0"), val = tensor("custom")]; + tensor input_85_pad_0 = const()[name = tensor("input_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_fc1_weight_to_fp16 = const()[name = tensor("layers_10_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420954176)))]; + tensor layers_10_fc1_bias_to_fp16 = const()[name = tensor("layers_10_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434061440)))]; + tensor input_85_cast_fp16 = conv(bias = layers_10_fc1_bias_to_fp16, dilations = var_1457, groups = var_1360, pad = input_85_pad_0, pad_type = input_85_pad_type_0, strides = var_1455, weight = layers_10_fc1_weight_to_fp16, x = input_83_cast_fp16)[name = tensor("input_85_cast_fp16")]; + tensor input_87_mode_0 = const()[name = tensor("input_87_mode_0"), val = tensor("EXACT")]; + tensor input_87_cast_fp16 = gelu(mode = input_87_mode_0, x = input_85_cast_fp16)[name = tensor("input_87_cast_fp16")]; + tensor var_1463 = const()[name = tensor("op_1463"), val = tensor([1, 1])]; + tensor var_1465 = const()[name = tensor("op_1465"), val = tensor([1, 1])]; + tensor hidden_states_25_pad_type_0 = const()[name = tensor("hidden_states_25_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_25_pad_0 = const()[name = tensor("hidden_states_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_10_fc2_weight_to_fp16 = const()[name = tensor("layers_10_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434071744)))]; + tensor layers_10_fc2_bias_to_fp16 = const()[name = tensor("layers_10_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447179008)))]; + tensor hidden_states_25_cast_fp16 = conv(bias = layers_10_fc2_bias_to_fp16, dilations = var_1465, groups = var_1360, pad = hidden_states_25_pad_0, pad_type = hidden_states_25_pad_type_0, strides = var_1463, weight = layers_10_fc2_weight_to_fp16, x = input_87_cast_fp16)[name = tensor("hidden_states_25_cast_fp16")]; + tensor inputs_45_cast_fp16 = add(x = inputs_43_cast_fp16, y = hidden_states_25_cast_fp16)[name = tensor("inputs_45_cast_fp16")]; + tensor var_1476 = const()[name = tensor("op_1476"), val = tensor(3)]; + tensor var_1478 = const()[name = tensor("op_1478"), val = tensor(1)]; + tensor var_1479 = const()[name = tensor("op_1479"), val = tensor(true)]; + tensor var_1489 = const()[name = tensor("op_1489"), val = tensor([1])]; + tensor channels_mean_45_cast_fp16 = reduce_mean(axes = var_1489, keep_dims = var_1479, x = inputs_45_cast_fp16)[name = tensor("channels_mean_45_cast_fp16")]; + tensor zero_mean_45_cast_fp16 = sub(x = inputs_45_cast_fp16, y = channels_mean_45_cast_fp16)[name = tensor("zero_mean_45_cast_fp16")]; + tensor zero_mean_sq_45_cast_fp16 = mul(x = zero_mean_45_cast_fp16, y = zero_mean_45_cast_fp16)[name = tensor("zero_mean_sq_45_cast_fp16")]; + tensor var_1493 = const()[name = tensor("op_1493"), val = tensor([1])]; + tensor var_1494_cast_fp16 = reduce_mean(axes = var_1493, keep_dims = var_1479, x = zero_mean_sq_45_cast_fp16)[name = tensor("op_1494_cast_fp16")]; + tensor var_1495_to_fp16 = const()[name = tensor("op_1495_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1496_cast_fp16 = add(x = var_1494_cast_fp16, y = var_1495_to_fp16)[name = tensor("op_1496_cast_fp16")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast_fp16 = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1496_cast_fp16)[name = tensor("denom_45_cast_fp16")]; + tensor out_45_cast_fp16 = mul(x = zero_mean_45_cast_fp16, y = denom_45_cast_fp16)[name = tensor("out_45_cast_fp16")]; + tensor obj_45_gamma_0_to_fp16 = const()[name = tensor("obj_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447181632)))]; + tensor obj_45_beta_0_to_fp16 = const()[name = tensor("obj_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447184256)))]; + tensor obj_45_epsilon_0_to_fp16 = const()[name = tensor("obj_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_45_cast_fp16 = batch_norm(beta = obj_45_beta_0_to_fp16, epsilon = obj_45_epsilon_0_to_fp16, gamma = obj_45_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_45_cast_fp16)[name = tensor("obj_45_cast_fp16")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([1, 1])]; + tensor var_1513 = const()[name = tensor("op_1513"), val = tensor([1, 1])]; + tensor query_23_pad_type_0 = const()[name = tensor("query_23_pad_type_0"), val = tensor("custom")]; + tensor query_23_pad_0 = const()[name = tensor("query_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_11_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447186880)))]; + tensor layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450463744)))]; + tensor query_23_cast_fp16 = conv(bias = layers_11_self_attn_q_proj_bias_to_fp16, dilations = var_1513, groups = var_1478, pad = query_23_pad_0, pad_type = query_23_pad_type_0, strides = var_1511, weight = layers_11_self_attn_q_proj_weight_to_fp16, x = obj_45_cast_fp16)[name = tensor("query_23_cast_fp16")]; + tensor var_1517 = const()[name = tensor("op_1517"), val = tensor([1, 1])]; + tensor var_1519 = const()[name = tensor("op_1519"), val = tensor([1, 1])]; + tensor key_23_pad_type_0 = const()[name = tensor("key_23_pad_type_0"), val = tensor("custom")]; + tensor key_23_pad_0 = const()[name = tensor("key_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_11_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450466368)))]; + tensor key_23_cast_fp16 = conv(dilations = var_1519, groups = var_1478, pad = key_23_pad_0, pad_type = key_23_pad_type_0, strides = var_1517, weight = layers_11_self_attn_k_proj_weight_to_fp16, x = obj_45_cast_fp16)[name = tensor("key_23_cast_fp16")]; + tensor var_1524 = const()[name = tensor("op_1524"), val = tensor([1, 1])]; + tensor var_1526 = const()[name = tensor("op_1526"), val = tensor([1, 1])]; + tensor value_23_pad_type_0 = const()[name = tensor("value_23_pad_type_0"), val = tensor("custom")]; + tensor value_23_pad_0 = const()[name = tensor("value_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_11_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(453743232)))]; + tensor layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(457020096)))]; + tensor value_23_cast_fp16 = conv(bias = layers_11_self_attn_v_proj_bias_to_fp16, dilations = var_1526, groups = var_1478, pad = value_23_pad_0, pad_type = value_23_pad_type_0, strides = var_1524, weight = layers_11_self_attn_v_proj_weight_to_fp16, x = obj_45_cast_fp16)[name = tensor("value_23_cast_fp16")]; + tensor var_1530 = const()[name = tensor("op_1530"), val = tensor([1, 20, 64, -1])]; + tensor var_1531_cast_fp16 = reshape(shape = var_1530, x = query_23_cast_fp16)[name = tensor("op_1531_cast_fp16")]; + tensor var_1532_to_fp16 = const()[name = tensor("op_1532_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1533_cast_fp16 = mul(x = var_1531_cast_fp16, y = var_1532_to_fp16)[name = tensor("op_1533_cast_fp16")]; + tensor var_1534 = const()[name = tensor("op_1534"), val = tensor([1, 20, 64, -1])]; + tensor var_1535_cast_fp16 = reshape(shape = var_1534, x = key_23_cast_fp16)[name = tensor("op_1535_cast_fp16")]; + tensor mh_w_23_transpose_x_0 = const()[name = tensor("mh_w_23_transpose_x_0"), val = tensor(true)]; + tensor mh_w_23_transpose_y_0 = const()[name = tensor("mh_w_23_transpose_y_0"), val = tensor(false)]; + tensor mh_w_23_cast_fp16 = matmul(transpose_x = mh_w_23_transpose_x_0, transpose_y = mh_w_23_transpose_y_0, x = var_1533_cast_fp16, y = var_1535_cast_fp16)[name = tensor("mh_w_23_cast_fp16")]; + tensor var_1538_cast_fp16 = softmax(axis = var_1476, x = mh_w_23_cast_fp16)[name = tensor("op_1538_cast_fp16")]; + tensor var_1539 = const()[name = tensor("op_1539"), val = tensor([1, 20, 64, -1])]; + tensor var_1540_cast_fp16 = reshape(shape = var_1539, x = value_23_cast_fp16)[name = tensor("op_1540_cast_fp16")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast_fp16 = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1540_cast_fp16, y = var_1538_cast_fp16)[name = tensor("attn_23_cast_fp16")]; + tensor var_1543 = const()[name = tensor("op_1543"), val = tensor([1, 1280, 1, -1])]; + tensor input_89_cast_fp16 = reshape(shape = var_1543, x = attn_23_cast_fp16)[name = tensor("input_89_cast_fp16")]; + tensor var_1547 = const()[name = tensor("op_1547"), val = tensor([1, 1])]; + tensor var_1549 = const()[name = tensor("op_1549"), val = tensor([1, 1])]; + tensor obj_47_pad_type_0 = const()[name = tensor("obj_47_pad_type_0"), val = tensor("custom")]; + tensor obj_47_pad_0 = const()[name = tensor("obj_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_11_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(457022720)))]; + tensor layers_11_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_11_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460299584)))]; + tensor obj_47_cast_fp16 = conv(bias = layers_11_self_attn_o_proj_bias_to_fp16, dilations = var_1549, groups = var_1478, pad = obj_47_pad_0, pad_type = obj_47_pad_type_0, strides = var_1547, weight = layers_11_self_attn_o_proj_weight_to_fp16, x = input_89_cast_fp16)[name = tensor("obj_47_cast_fp16")]; + tensor inputs_47_cast_fp16 = add(x = inputs_45_cast_fp16, y = obj_47_cast_fp16)[name = tensor("inputs_47_cast_fp16")]; + tensor var_1555 = const()[name = tensor("op_1555"), val = tensor([1])]; + tensor channels_mean_47_cast_fp16 = reduce_mean(axes = var_1555, keep_dims = var_1479, x = inputs_47_cast_fp16)[name = tensor("channels_mean_47_cast_fp16")]; + tensor zero_mean_47_cast_fp16 = sub(x = inputs_47_cast_fp16, y = channels_mean_47_cast_fp16)[name = tensor("zero_mean_47_cast_fp16")]; + tensor zero_mean_sq_47_cast_fp16 = mul(x = zero_mean_47_cast_fp16, y = zero_mean_47_cast_fp16)[name = tensor("zero_mean_sq_47_cast_fp16")]; + tensor var_1559 = const()[name = tensor("op_1559"), val = tensor([1])]; + tensor var_1560_cast_fp16 = reduce_mean(axes = var_1559, keep_dims = var_1479, x = zero_mean_sq_47_cast_fp16)[name = tensor("op_1560_cast_fp16")]; + tensor var_1561_to_fp16 = const()[name = tensor("op_1561_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1562_cast_fp16 = add(x = var_1560_cast_fp16, y = var_1561_to_fp16)[name = tensor("op_1562_cast_fp16")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast_fp16 = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1562_cast_fp16)[name = tensor("denom_47_cast_fp16")]; + tensor out_47_cast_fp16 = mul(x = zero_mean_47_cast_fp16, y = denom_47_cast_fp16)[name = tensor("out_47_cast_fp16")]; + tensor input_91_gamma_0_to_fp16 = const()[name = tensor("input_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460302208)))]; + tensor input_91_beta_0_to_fp16 = const()[name = tensor("input_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460304832)))]; + tensor input_91_epsilon_0_to_fp16 = const()[name = tensor("input_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_91_cast_fp16 = batch_norm(beta = input_91_beta_0_to_fp16, epsilon = input_91_epsilon_0_to_fp16, gamma = input_91_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_47_cast_fp16)[name = tensor("input_91_cast_fp16")]; + tensor var_1573 = const()[name = tensor("op_1573"), val = tensor([1, 1])]; + tensor var_1575 = const()[name = tensor("op_1575"), val = tensor([1, 1])]; + tensor input_93_pad_type_0 = const()[name = tensor("input_93_pad_type_0"), val = tensor("custom")]; + tensor input_93_pad_0 = const()[name = tensor("input_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_fc1_weight_to_fp16 = const()[name = tensor("layers_11_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(460307456)))]; + tensor layers_11_fc1_bias_to_fp16 = const()[name = tensor("layers_11_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473414720)))]; + tensor input_93_cast_fp16 = conv(bias = layers_11_fc1_bias_to_fp16, dilations = var_1575, groups = var_1478, pad = input_93_pad_0, pad_type = input_93_pad_type_0, strides = var_1573, weight = layers_11_fc1_weight_to_fp16, x = input_91_cast_fp16)[name = tensor("input_93_cast_fp16")]; + tensor input_95_mode_0 = const()[name = tensor("input_95_mode_0"), val = tensor("EXACT")]; + tensor input_95_cast_fp16 = gelu(mode = input_95_mode_0, x = input_93_cast_fp16)[name = tensor("input_95_cast_fp16")]; + tensor var_1581 = const()[name = tensor("op_1581"), val = tensor([1, 1])]; + tensor var_1583 = const()[name = tensor("op_1583"), val = tensor([1, 1])]; + tensor hidden_states_27_pad_type_0 = const()[name = tensor("hidden_states_27_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_27_pad_0 = const()[name = tensor("hidden_states_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_11_fc2_weight_to_fp16 = const()[name = tensor("layers_11_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473425024)))]; + tensor layers_11_fc2_bias_to_fp16 = const()[name = tensor("layers_11_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486532288)))]; + tensor hidden_states_27_cast_fp16 = conv(bias = layers_11_fc2_bias_to_fp16, dilations = var_1583, groups = var_1478, pad = hidden_states_27_pad_0, pad_type = hidden_states_27_pad_type_0, strides = var_1581, weight = layers_11_fc2_weight_to_fp16, x = input_95_cast_fp16)[name = tensor("hidden_states_27_cast_fp16")]; + tensor inputs_49_cast_fp16 = add(x = inputs_47_cast_fp16, y = hidden_states_27_cast_fp16)[name = tensor("inputs_49_cast_fp16")]; + tensor var_1594 = const()[name = tensor("op_1594"), val = tensor(3)]; + tensor var_1596 = const()[name = tensor("op_1596"), val = tensor(1)]; + tensor var_1597 = const()[name = tensor("op_1597"), val = tensor(true)]; + tensor var_1607 = const()[name = tensor("op_1607"), val = tensor([1])]; + tensor channels_mean_49_cast_fp16 = reduce_mean(axes = var_1607, keep_dims = var_1597, x = inputs_49_cast_fp16)[name = tensor("channels_mean_49_cast_fp16")]; + tensor zero_mean_49_cast_fp16 = sub(x = inputs_49_cast_fp16, y = channels_mean_49_cast_fp16)[name = tensor("zero_mean_49_cast_fp16")]; + tensor zero_mean_sq_49_cast_fp16 = mul(x = zero_mean_49_cast_fp16, y = zero_mean_49_cast_fp16)[name = tensor("zero_mean_sq_49_cast_fp16")]; + tensor var_1611 = const()[name = tensor("op_1611"), val = tensor([1])]; + tensor var_1612_cast_fp16 = reduce_mean(axes = var_1611, keep_dims = var_1597, x = zero_mean_sq_49_cast_fp16)[name = tensor("op_1612_cast_fp16")]; + tensor var_1613_to_fp16 = const()[name = tensor("op_1613_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1614_cast_fp16 = add(x = var_1612_cast_fp16, y = var_1613_to_fp16)[name = tensor("op_1614_cast_fp16")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast_fp16 = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_1614_cast_fp16)[name = tensor("denom_49_cast_fp16")]; + tensor out_49_cast_fp16 = mul(x = zero_mean_49_cast_fp16, y = denom_49_cast_fp16)[name = tensor("out_49_cast_fp16")]; + tensor obj_49_gamma_0_to_fp16 = const()[name = tensor("obj_49_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486534912)))]; + tensor obj_49_beta_0_to_fp16 = const()[name = tensor("obj_49_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486537536)))]; + tensor obj_49_epsilon_0_to_fp16 = const()[name = tensor("obj_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_49_cast_fp16 = batch_norm(beta = obj_49_beta_0_to_fp16, epsilon = obj_49_epsilon_0_to_fp16, gamma = obj_49_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_49_cast_fp16)[name = tensor("obj_49_cast_fp16")]; + tensor var_1629 = const()[name = tensor("op_1629"), val = tensor([1, 1])]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 1])]; + tensor query_25_pad_type_0 = const()[name = tensor("query_25_pad_type_0"), val = tensor("custom")]; + tensor query_25_pad_0 = const()[name = tensor("query_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_12_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(486540160)))]; + tensor layers_12_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_12_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489817024)))]; + tensor query_25_cast_fp16 = conv(bias = layers_12_self_attn_q_proj_bias_to_fp16, dilations = var_1631, groups = var_1596, pad = query_25_pad_0, pad_type = query_25_pad_type_0, strides = var_1629, weight = layers_12_self_attn_q_proj_weight_to_fp16, x = obj_49_cast_fp16)[name = tensor("query_25_cast_fp16")]; + tensor var_1635 = const()[name = tensor("op_1635"), val = tensor([1, 1])]; + tensor var_1637 = const()[name = tensor("op_1637"), val = tensor([1, 1])]; + tensor key_25_pad_type_0 = const()[name = tensor("key_25_pad_type_0"), val = tensor("custom")]; + tensor key_25_pad_0 = const()[name = tensor("key_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_12_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489819648)))]; + tensor key_25_cast_fp16 = conv(dilations = var_1637, groups = var_1596, pad = key_25_pad_0, pad_type = key_25_pad_type_0, strides = var_1635, weight = layers_12_self_attn_k_proj_weight_to_fp16, x = obj_49_cast_fp16)[name = tensor("key_25_cast_fp16")]; + tensor var_1642 = const()[name = tensor("op_1642"), val = tensor([1, 1])]; + tensor var_1644 = const()[name = tensor("op_1644"), val = tensor([1, 1])]; + tensor value_25_pad_type_0 = const()[name = tensor("value_25_pad_type_0"), val = tensor("custom")]; + tensor value_25_pad_0 = const()[name = tensor("value_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_12_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(493096512)))]; + tensor layers_12_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_12_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496373376)))]; + tensor value_25_cast_fp16 = conv(bias = layers_12_self_attn_v_proj_bias_to_fp16, dilations = var_1644, groups = var_1596, pad = value_25_pad_0, pad_type = value_25_pad_type_0, strides = var_1642, weight = layers_12_self_attn_v_proj_weight_to_fp16, x = obj_49_cast_fp16)[name = tensor("value_25_cast_fp16")]; + tensor var_1648 = const()[name = tensor("op_1648"), val = tensor([1, 20, 64, -1])]; + tensor var_1649_cast_fp16 = reshape(shape = var_1648, x = query_25_cast_fp16)[name = tensor("op_1649_cast_fp16")]; + tensor var_1650_to_fp16 = const()[name = tensor("op_1650_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1651_cast_fp16 = mul(x = var_1649_cast_fp16, y = var_1650_to_fp16)[name = tensor("op_1651_cast_fp16")]; + tensor var_1652 = const()[name = tensor("op_1652"), val = tensor([1, 20, 64, -1])]; + tensor var_1653_cast_fp16 = reshape(shape = var_1652, x = key_25_cast_fp16)[name = tensor("op_1653_cast_fp16")]; + tensor mh_w_25_transpose_x_0 = const()[name = tensor("mh_w_25_transpose_x_0"), val = tensor(true)]; + tensor mh_w_25_transpose_y_0 = const()[name = tensor("mh_w_25_transpose_y_0"), val = tensor(false)]; + tensor mh_w_25_cast_fp16 = matmul(transpose_x = mh_w_25_transpose_x_0, transpose_y = mh_w_25_transpose_y_0, x = var_1651_cast_fp16, y = var_1653_cast_fp16)[name = tensor("mh_w_25_cast_fp16")]; + tensor var_1656_cast_fp16 = softmax(axis = var_1594, x = mh_w_25_cast_fp16)[name = tensor("op_1656_cast_fp16")]; + tensor var_1657 = const()[name = tensor("op_1657"), val = tensor([1, 20, 64, -1])]; + tensor var_1658_cast_fp16 = reshape(shape = var_1657, x = value_25_cast_fp16)[name = tensor("op_1658_cast_fp16")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast_fp16 = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1658_cast_fp16, y = var_1656_cast_fp16)[name = tensor("attn_25_cast_fp16")]; + tensor var_1661 = const()[name = tensor("op_1661"), val = tensor([1, 1280, 1, -1])]; + tensor input_97_cast_fp16 = reshape(shape = var_1661, x = attn_25_cast_fp16)[name = tensor("input_97_cast_fp16")]; + tensor var_1665 = const()[name = tensor("op_1665"), val = tensor([1, 1])]; + tensor var_1667 = const()[name = tensor("op_1667"), val = tensor([1, 1])]; + tensor obj_51_pad_type_0 = const()[name = tensor("obj_51_pad_type_0"), val = tensor("custom")]; + tensor obj_51_pad_0 = const()[name = tensor("obj_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_12_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496376000)))]; + tensor layers_12_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_12_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499652864)))]; + tensor obj_51_cast_fp16 = conv(bias = layers_12_self_attn_o_proj_bias_to_fp16, dilations = var_1667, groups = var_1596, pad = obj_51_pad_0, pad_type = obj_51_pad_type_0, strides = var_1665, weight = layers_12_self_attn_o_proj_weight_to_fp16, x = input_97_cast_fp16)[name = tensor("obj_51_cast_fp16")]; + tensor inputs_51_cast_fp16 = add(x = inputs_49_cast_fp16, y = obj_51_cast_fp16)[name = tensor("inputs_51_cast_fp16")]; + tensor var_1673 = const()[name = tensor("op_1673"), val = tensor([1])]; + tensor channels_mean_51_cast_fp16 = reduce_mean(axes = var_1673, keep_dims = var_1597, x = inputs_51_cast_fp16)[name = tensor("channels_mean_51_cast_fp16")]; + tensor zero_mean_51_cast_fp16 = sub(x = inputs_51_cast_fp16, y = channels_mean_51_cast_fp16)[name = tensor("zero_mean_51_cast_fp16")]; + tensor zero_mean_sq_51_cast_fp16 = mul(x = zero_mean_51_cast_fp16, y = zero_mean_51_cast_fp16)[name = tensor("zero_mean_sq_51_cast_fp16")]; + tensor var_1677 = const()[name = tensor("op_1677"), val = tensor([1])]; + tensor var_1678_cast_fp16 = reduce_mean(axes = var_1677, keep_dims = var_1597, x = zero_mean_sq_51_cast_fp16)[name = tensor("op_1678_cast_fp16")]; + tensor var_1679_to_fp16 = const()[name = tensor("op_1679_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1680_cast_fp16 = add(x = var_1678_cast_fp16, y = var_1679_to_fp16)[name = tensor("op_1680_cast_fp16")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast_fp16 = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_1680_cast_fp16)[name = tensor("denom_51_cast_fp16")]; + tensor out_51_cast_fp16 = mul(x = zero_mean_51_cast_fp16, y = denom_51_cast_fp16)[name = tensor("out_51_cast_fp16")]; + tensor input_99_gamma_0_to_fp16 = const()[name = tensor("input_99_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499655488)))]; + tensor input_99_beta_0_to_fp16 = const()[name = tensor("input_99_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499658112)))]; + tensor input_99_epsilon_0_to_fp16 = const()[name = tensor("input_99_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_99_cast_fp16 = batch_norm(beta = input_99_beta_0_to_fp16, epsilon = input_99_epsilon_0_to_fp16, gamma = input_99_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_51_cast_fp16)[name = tensor("input_99_cast_fp16")]; + tensor var_1691 = const()[name = tensor("op_1691"), val = tensor([1, 1])]; + tensor var_1693 = const()[name = tensor("op_1693"), val = tensor([1, 1])]; + tensor input_101_pad_type_0 = const()[name = tensor("input_101_pad_type_0"), val = tensor("custom")]; + tensor input_101_pad_0 = const()[name = tensor("input_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_fc1_weight_to_fp16 = const()[name = tensor("layers_12_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499660736)))]; + tensor layers_12_fc1_bias_to_fp16 = const()[name = tensor("layers_12_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512768000)))]; + tensor input_101_cast_fp16 = conv(bias = layers_12_fc1_bias_to_fp16, dilations = var_1693, groups = var_1596, pad = input_101_pad_0, pad_type = input_101_pad_type_0, strides = var_1691, weight = layers_12_fc1_weight_to_fp16, x = input_99_cast_fp16)[name = tensor("input_101_cast_fp16")]; + tensor input_103_mode_0 = const()[name = tensor("input_103_mode_0"), val = tensor("EXACT")]; + tensor input_103_cast_fp16 = gelu(mode = input_103_mode_0, x = input_101_cast_fp16)[name = tensor("input_103_cast_fp16")]; + tensor var_1699 = const()[name = tensor("op_1699"), val = tensor([1, 1])]; + tensor var_1701 = const()[name = tensor("op_1701"), val = tensor([1, 1])]; + tensor hidden_states_29_pad_type_0 = const()[name = tensor("hidden_states_29_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_29_pad_0 = const()[name = tensor("hidden_states_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_12_fc2_weight_to_fp16 = const()[name = tensor("layers_12_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512778304)))]; + tensor layers_12_fc2_bias_to_fp16 = const()[name = tensor("layers_12_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525885568)))]; + tensor hidden_states_29_cast_fp16 = conv(bias = layers_12_fc2_bias_to_fp16, dilations = var_1701, groups = var_1596, pad = hidden_states_29_pad_0, pad_type = hidden_states_29_pad_type_0, strides = var_1699, weight = layers_12_fc2_weight_to_fp16, x = input_103_cast_fp16)[name = tensor("hidden_states_29_cast_fp16")]; + tensor inputs_53_cast_fp16 = add(x = inputs_51_cast_fp16, y = hidden_states_29_cast_fp16)[name = tensor("inputs_53_cast_fp16")]; + tensor var_1712 = const()[name = tensor("op_1712"), val = tensor(3)]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor(1)]; + tensor var_1715 = const()[name = tensor("op_1715"), val = tensor(true)]; + tensor var_1725 = const()[name = tensor("op_1725"), val = tensor([1])]; + tensor channels_mean_53_cast_fp16 = reduce_mean(axes = var_1725, keep_dims = var_1715, x = inputs_53_cast_fp16)[name = tensor("channels_mean_53_cast_fp16")]; + tensor zero_mean_53_cast_fp16 = sub(x = inputs_53_cast_fp16, y = channels_mean_53_cast_fp16)[name = tensor("zero_mean_53_cast_fp16")]; + tensor zero_mean_sq_53_cast_fp16 = mul(x = zero_mean_53_cast_fp16, y = zero_mean_53_cast_fp16)[name = tensor("zero_mean_sq_53_cast_fp16")]; + tensor var_1729 = const()[name = tensor("op_1729"), val = tensor([1])]; + tensor var_1730_cast_fp16 = reduce_mean(axes = var_1729, keep_dims = var_1715, x = zero_mean_sq_53_cast_fp16)[name = tensor("op_1730_cast_fp16")]; + tensor var_1731_to_fp16 = const()[name = tensor("op_1731_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1732_cast_fp16 = add(x = var_1730_cast_fp16, y = var_1731_to_fp16)[name = tensor("op_1732_cast_fp16")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast_fp16 = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_1732_cast_fp16)[name = tensor("denom_53_cast_fp16")]; + tensor out_53_cast_fp16 = mul(x = zero_mean_53_cast_fp16, y = denom_53_cast_fp16)[name = tensor("out_53_cast_fp16")]; + tensor obj_53_gamma_0_to_fp16 = const()[name = tensor("obj_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525888192)))]; + tensor obj_53_beta_0_to_fp16 = const()[name = tensor("obj_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525890816)))]; + tensor obj_53_epsilon_0_to_fp16 = const()[name = tensor("obj_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_53_cast_fp16 = batch_norm(beta = obj_53_beta_0_to_fp16, epsilon = obj_53_epsilon_0_to_fp16, gamma = obj_53_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_53_cast_fp16)[name = tensor("obj_53_cast_fp16")]; + tensor var_1747 = const()[name = tensor("op_1747"), val = tensor([1, 1])]; + tensor var_1749 = const()[name = tensor("op_1749"), val = tensor([1, 1])]; + tensor query_27_pad_type_0 = const()[name = tensor("query_27_pad_type_0"), val = tensor("custom")]; + tensor query_27_pad_0 = const()[name = tensor("query_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_13_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525893440)))]; + tensor layers_13_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_13_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529170304)))]; + tensor query_27_cast_fp16 = conv(bias = layers_13_self_attn_q_proj_bias_to_fp16, dilations = var_1749, groups = var_1714, pad = query_27_pad_0, pad_type = query_27_pad_type_0, strides = var_1747, weight = layers_13_self_attn_q_proj_weight_to_fp16, x = obj_53_cast_fp16)[name = tensor("query_27_cast_fp16")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([1, 1])]; + tensor var_1755 = const()[name = tensor("op_1755"), val = tensor([1, 1])]; + tensor key_27_pad_type_0 = const()[name = tensor("key_27_pad_type_0"), val = tensor("custom")]; + tensor key_27_pad_0 = const()[name = tensor("key_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_13_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529172928)))]; + tensor key_27_cast_fp16 = conv(dilations = var_1755, groups = var_1714, pad = key_27_pad_0, pad_type = key_27_pad_type_0, strides = var_1753, weight = layers_13_self_attn_k_proj_weight_to_fp16, x = obj_53_cast_fp16)[name = tensor("key_27_cast_fp16")]; + tensor var_1760 = const()[name = tensor("op_1760"), val = tensor([1, 1])]; + tensor var_1762 = const()[name = tensor("op_1762"), val = tensor([1, 1])]; + tensor value_27_pad_type_0 = const()[name = tensor("value_27_pad_type_0"), val = tensor("custom")]; + tensor value_27_pad_0 = const()[name = tensor("value_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_13_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532449792)))]; + tensor layers_13_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_13_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535726656)))]; + tensor value_27_cast_fp16 = conv(bias = layers_13_self_attn_v_proj_bias_to_fp16, dilations = var_1762, groups = var_1714, pad = value_27_pad_0, pad_type = value_27_pad_type_0, strides = var_1760, weight = layers_13_self_attn_v_proj_weight_to_fp16, x = obj_53_cast_fp16)[name = tensor("value_27_cast_fp16")]; + tensor var_1766 = const()[name = tensor("op_1766"), val = tensor([1, 20, 64, -1])]; + tensor var_1767_cast_fp16 = reshape(shape = var_1766, x = query_27_cast_fp16)[name = tensor("op_1767_cast_fp16")]; + tensor var_1768_to_fp16 = const()[name = tensor("op_1768_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1769_cast_fp16 = mul(x = var_1767_cast_fp16, y = var_1768_to_fp16)[name = tensor("op_1769_cast_fp16")]; + tensor var_1770 = const()[name = tensor("op_1770"), val = tensor([1, 20, 64, -1])]; + tensor var_1771_cast_fp16 = reshape(shape = var_1770, x = key_27_cast_fp16)[name = tensor("op_1771_cast_fp16")]; + tensor mh_w_27_transpose_x_0 = const()[name = tensor("mh_w_27_transpose_x_0"), val = tensor(true)]; + tensor mh_w_27_transpose_y_0 = const()[name = tensor("mh_w_27_transpose_y_0"), val = tensor(false)]; + tensor mh_w_27_cast_fp16 = matmul(transpose_x = mh_w_27_transpose_x_0, transpose_y = mh_w_27_transpose_y_0, x = var_1769_cast_fp16, y = var_1771_cast_fp16)[name = tensor("mh_w_27_cast_fp16")]; + tensor var_1774_cast_fp16 = softmax(axis = var_1712, x = mh_w_27_cast_fp16)[name = tensor("op_1774_cast_fp16")]; + tensor var_1775 = const()[name = tensor("op_1775"), val = tensor([1, 20, 64, -1])]; + tensor var_1776_cast_fp16 = reshape(shape = var_1775, x = value_27_cast_fp16)[name = tensor("op_1776_cast_fp16")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast_fp16 = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1776_cast_fp16, y = var_1774_cast_fp16)[name = tensor("attn_27_cast_fp16")]; + tensor var_1779 = const()[name = tensor("op_1779"), val = tensor([1, 1280, 1, -1])]; + tensor input_105_cast_fp16 = reshape(shape = var_1779, x = attn_27_cast_fp16)[name = tensor("input_105_cast_fp16")]; + tensor var_1783 = const()[name = tensor("op_1783"), val = tensor([1, 1])]; + tensor var_1785 = const()[name = tensor("op_1785"), val = tensor([1, 1])]; + tensor obj_55_pad_type_0 = const()[name = tensor("obj_55_pad_type_0"), val = tensor("custom")]; + tensor obj_55_pad_0 = const()[name = tensor("obj_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_13_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535729280)))]; + tensor layers_13_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_13_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539006144)))]; + tensor obj_55_cast_fp16 = conv(bias = layers_13_self_attn_o_proj_bias_to_fp16, dilations = var_1785, groups = var_1714, pad = obj_55_pad_0, pad_type = obj_55_pad_type_0, strides = var_1783, weight = layers_13_self_attn_o_proj_weight_to_fp16, x = input_105_cast_fp16)[name = tensor("obj_55_cast_fp16")]; + tensor inputs_55_cast_fp16 = add(x = inputs_53_cast_fp16, y = obj_55_cast_fp16)[name = tensor("inputs_55_cast_fp16")]; + tensor var_1791 = const()[name = tensor("op_1791"), val = tensor([1])]; + tensor channels_mean_55_cast_fp16 = reduce_mean(axes = var_1791, keep_dims = var_1715, x = inputs_55_cast_fp16)[name = tensor("channels_mean_55_cast_fp16")]; + tensor zero_mean_55_cast_fp16 = sub(x = inputs_55_cast_fp16, y = channels_mean_55_cast_fp16)[name = tensor("zero_mean_55_cast_fp16")]; + tensor zero_mean_sq_55_cast_fp16 = mul(x = zero_mean_55_cast_fp16, y = zero_mean_55_cast_fp16)[name = tensor("zero_mean_sq_55_cast_fp16")]; + tensor var_1795 = const()[name = tensor("op_1795"), val = tensor([1])]; + tensor var_1796_cast_fp16 = reduce_mean(axes = var_1795, keep_dims = var_1715, x = zero_mean_sq_55_cast_fp16)[name = tensor("op_1796_cast_fp16")]; + tensor var_1797_to_fp16 = const()[name = tensor("op_1797_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1798_cast_fp16 = add(x = var_1796_cast_fp16, y = var_1797_to_fp16)[name = tensor("op_1798_cast_fp16")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast_fp16 = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_1798_cast_fp16)[name = tensor("denom_55_cast_fp16")]; + tensor out_55_cast_fp16 = mul(x = zero_mean_55_cast_fp16, y = denom_55_cast_fp16)[name = tensor("out_55_cast_fp16")]; + tensor input_107_gamma_0_to_fp16 = const()[name = tensor("input_107_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539008768)))]; + tensor input_107_beta_0_to_fp16 = const()[name = tensor("input_107_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539011392)))]; + tensor input_107_epsilon_0_to_fp16 = const()[name = tensor("input_107_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_107_cast_fp16 = batch_norm(beta = input_107_beta_0_to_fp16, epsilon = input_107_epsilon_0_to_fp16, gamma = input_107_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_55_cast_fp16)[name = tensor("input_107_cast_fp16")]; + tensor var_1809 = const()[name = tensor("op_1809"), val = tensor([1, 1])]; + tensor var_1811 = const()[name = tensor("op_1811"), val = tensor([1, 1])]; + tensor input_109_pad_type_0 = const()[name = tensor("input_109_pad_type_0"), val = tensor("custom")]; + tensor input_109_pad_0 = const()[name = tensor("input_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_fc1_weight_to_fp16 = const()[name = tensor("layers_13_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539014016)))]; + tensor layers_13_fc1_bias_to_fp16 = const()[name = tensor("layers_13_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552121280)))]; + tensor input_109_cast_fp16 = conv(bias = layers_13_fc1_bias_to_fp16, dilations = var_1811, groups = var_1714, pad = input_109_pad_0, pad_type = input_109_pad_type_0, strides = var_1809, weight = layers_13_fc1_weight_to_fp16, x = input_107_cast_fp16)[name = tensor("input_109_cast_fp16")]; + tensor input_111_mode_0 = const()[name = tensor("input_111_mode_0"), val = tensor("EXACT")]; + tensor input_111_cast_fp16 = gelu(mode = input_111_mode_0, x = input_109_cast_fp16)[name = tensor("input_111_cast_fp16")]; + tensor var_1817 = const()[name = tensor("op_1817"), val = tensor([1, 1])]; + tensor var_1819 = const()[name = tensor("op_1819"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_13_fc2_weight_to_fp16 = const()[name = tensor("layers_13_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552131584)))]; + tensor layers_13_fc2_bias_to_fp16 = const()[name = tensor("layers_13_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565238848)))]; + tensor hidden_states_31_cast_fp16 = conv(bias = layers_13_fc2_bias_to_fp16, dilations = var_1819, groups = var_1714, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_1817, weight = layers_13_fc2_weight_to_fp16, x = input_111_cast_fp16)[name = tensor("hidden_states_31_cast_fp16")]; + tensor inputs_57_cast_fp16 = add(x = inputs_55_cast_fp16, y = hidden_states_31_cast_fp16)[name = tensor("inputs_57_cast_fp16")]; + tensor var_1830 = const()[name = tensor("op_1830"), val = tensor(3)]; + tensor var_1832 = const()[name = tensor("op_1832"), val = tensor(1)]; + tensor var_1833 = const()[name = tensor("op_1833"), val = tensor(true)]; + tensor var_1843 = const()[name = tensor("op_1843"), val = tensor([1])]; + tensor channels_mean_57_cast_fp16 = reduce_mean(axes = var_1843, keep_dims = var_1833, x = inputs_57_cast_fp16)[name = tensor("channels_mean_57_cast_fp16")]; + tensor zero_mean_57_cast_fp16 = sub(x = inputs_57_cast_fp16, y = channels_mean_57_cast_fp16)[name = tensor("zero_mean_57_cast_fp16")]; + tensor zero_mean_sq_57_cast_fp16 = mul(x = zero_mean_57_cast_fp16, y = zero_mean_57_cast_fp16)[name = tensor("zero_mean_sq_57_cast_fp16")]; + tensor var_1847 = const()[name = tensor("op_1847"), val = tensor([1])]; + tensor var_1848_cast_fp16 = reduce_mean(axes = var_1847, keep_dims = var_1833, x = zero_mean_sq_57_cast_fp16)[name = tensor("op_1848_cast_fp16")]; + tensor var_1849_to_fp16 = const()[name = tensor("op_1849_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1850_cast_fp16 = add(x = var_1848_cast_fp16, y = var_1849_to_fp16)[name = tensor("op_1850_cast_fp16")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast_fp16 = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_1850_cast_fp16)[name = tensor("denom_57_cast_fp16")]; + tensor out_57_cast_fp16 = mul(x = zero_mean_57_cast_fp16, y = denom_57_cast_fp16)[name = tensor("out_57_cast_fp16")]; + tensor obj_57_gamma_0_to_fp16 = const()[name = tensor("obj_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565241472)))]; + tensor obj_57_beta_0_to_fp16 = const()[name = tensor("obj_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565244096)))]; + tensor obj_57_epsilon_0_to_fp16 = const()[name = tensor("obj_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_57_cast_fp16 = batch_norm(beta = obj_57_beta_0_to_fp16, epsilon = obj_57_epsilon_0_to_fp16, gamma = obj_57_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_57_cast_fp16)[name = tensor("obj_57_cast_fp16")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([1, 1])]; + tensor var_1867 = const()[name = tensor("op_1867"), val = tensor([1, 1])]; + tensor query_29_pad_type_0 = const()[name = tensor("query_29_pad_type_0"), val = tensor("custom")]; + tensor query_29_pad_0 = const()[name = tensor("query_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_14_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565246720)))]; + tensor layers_14_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_14_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568523584)))]; + tensor query_29_cast_fp16 = conv(bias = layers_14_self_attn_q_proj_bias_to_fp16, dilations = var_1867, groups = var_1832, pad = query_29_pad_0, pad_type = query_29_pad_type_0, strides = var_1865, weight = layers_14_self_attn_q_proj_weight_to_fp16, x = obj_57_cast_fp16)[name = tensor("query_29_cast_fp16")]; + tensor var_1871 = const()[name = tensor("op_1871"), val = tensor([1, 1])]; + tensor var_1873 = const()[name = tensor("op_1873"), val = tensor([1, 1])]; + tensor key_29_pad_type_0 = const()[name = tensor("key_29_pad_type_0"), val = tensor("custom")]; + tensor key_29_pad_0 = const()[name = tensor("key_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_14_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568526208)))]; + tensor key_29_cast_fp16 = conv(dilations = var_1873, groups = var_1832, pad = key_29_pad_0, pad_type = key_29_pad_type_0, strides = var_1871, weight = layers_14_self_attn_k_proj_weight_to_fp16, x = obj_57_cast_fp16)[name = tensor("key_29_cast_fp16")]; + tensor var_1878 = const()[name = tensor("op_1878"), val = tensor([1, 1])]; + tensor var_1880 = const()[name = tensor("op_1880"), val = tensor([1, 1])]; + tensor value_29_pad_type_0 = const()[name = tensor("value_29_pad_type_0"), val = tensor("custom")]; + tensor value_29_pad_0 = const()[name = tensor("value_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_14_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571803072)))]; + tensor layers_14_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_14_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(575079936)))]; + tensor value_29_cast_fp16 = conv(bias = layers_14_self_attn_v_proj_bias_to_fp16, dilations = var_1880, groups = var_1832, pad = value_29_pad_0, pad_type = value_29_pad_type_0, strides = var_1878, weight = layers_14_self_attn_v_proj_weight_to_fp16, x = obj_57_cast_fp16)[name = tensor("value_29_cast_fp16")]; + tensor var_1884 = const()[name = tensor("op_1884"), val = tensor([1, 20, 64, -1])]; + tensor var_1885_cast_fp16 = reshape(shape = var_1884, x = query_29_cast_fp16)[name = tensor("op_1885_cast_fp16")]; + tensor var_1886_to_fp16 = const()[name = tensor("op_1886_to_fp16"), val = tensor(0x1p-3)]; + tensor var_1887_cast_fp16 = mul(x = var_1885_cast_fp16, y = var_1886_to_fp16)[name = tensor("op_1887_cast_fp16")]; + tensor var_1888 = const()[name = tensor("op_1888"), val = tensor([1, 20, 64, -1])]; + tensor var_1889_cast_fp16 = reshape(shape = var_1888, x = key_29_cast_fp16)[name = tensor("op_1889_cast_fp16")]; + tensor mh_w_29_transpose_x_0 = const()[name = tensor("mh_w_29_transpose_x_0"), val = tensor(true)]; + tensor mh_w_29_transpose_y_0 = const()[name = tensor("mh_w_29_transpose_y_0"), val = tensor(false)]; + tensor mh_w_29_cast_fp16 = matmul(transpose_x = mh_w_29_transpose_x_0, transpose_y = mh_w_29_transpose_y_0, x = var_1887_cast_fp16, y = var_1889_cast_fp16)[name = tensor("mh_w_29_cast_fp16")]; + tensor var_1892_cast_fp16 = softmax(axis = var_1830, x = mh_w_29_cast_fp16)[name = tensor("op_1892_cast_fp16")]; + tensor var_1893 = const()[name = tensor("op_1893"), val = tensor([1, 20, 64, -1])]; + tensor var_1894_cast_fp16 = reshape(shape = var_1893, x = value_29_cast_fp16)[name = tensor("op_1894_cast_fp16")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast_fp16 = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1894_cast_fp16, y = var_1892_cast_fp16)[name = tensor("attn_29_cast_fp16")]; + tensor var_1897 = const()[name = tensor("op_1897"), val = tensor([1, 1280, 1, -1])]; + tensor input_113_cast_fp16 = reshape(shape = var_1897, x = attn_29_cast_fp16)[name = tensor("input_113_cast_fp16")]; + tensor var_1901 = const()[name = tensor("op_1901"), val = tensor([1, 1])]; + tensor var_1903 = const()[name = tensor("op_1903"), val = tensor([1, 1])]; + tensor obj_59_pad_type_0 = const()[name = tensor("obj_59_pad_type_0"), val = tensor("custom")]; + tensor obj_59_pad_0 = const()[name = tensor("obj_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_14_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(575082560)))]; + tensor layers_14_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_14_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578359424)))]; + tensor obj_59_cast_fp16 = conv(bias = layers_14_self_attn_o_proj_bias_to_fp16, dilations = var_1903, groups = var_1832, pad = obj_59_pad_0, pad_type = obj_59_pad_type_0, strides = var_1901, weight = layers_14_self_attn_o_proj_weight_to_fp16, x = input_113_cast_fp16)[name = tensor("obj_59_cast_fp16")]; + tensor inputs_59_cast_fp16 = add(x = inputs_57_cast_fp16, y = obj_59_cast_fp16)[name = tensor("inputs_59_cast_fp16")]; + tensor var_1909 = const()[name = tensor("op_1909"), val = tensor([1])]; + tensor channels_mean_59_cast_fp16 = reduce_mean(axes = var_1909, keep_dims = var_1833, x = inputs_59_cast_fp16)[name = tensor("channels_mean_59_cast_fp16")]; + tensor zero_mean_59_cast_fp16 = sub(x = inputs_59_cast_fp16, y = channels_mean_59_cast_fp16)[name = tensor("zero_mean_59_cast_fp16")]; + tensor zero_mean_sq_59_cast_fp16 = mul(x = zero_mean_59_cast_fp16, y = zero_mean_59_cast_fp16)[name = tensor("zero_mean_sq_59_cast_fp16")]; + tensor var_1913 = const()[name = tensor("op_1913"), val = tensor([1])]; + tensor var_1914_cast_fp16 = reduce_mean(axes = var_1913, keep_dims = var_1833, x = zero_mean_sq_59_cast_fp16)[name = tensor("op_1914_cast_fp16")]; + tensor var_1915_to_fp16 = const()[name = tensor("op_1915_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1916_cast_fp16 = add(x = var_1914_cast_fp16, y = var_1915_to_fp16)[name = tensor("op_1916_cast_fp16")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast_fp16 = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_1916_cast_fp16)[name = tensor("denom_59_cast_fp16")]; + tensor out_59_cast_fp16 = mul(x = zero_mean_59_cast_fp16, y = denom_59_cast_fp16)[name = tensor("out_59_cast_fp16")]; + tensor input_115_gamma_0_to_fp16 = const()[name = tensor("input_115_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578362048)))]; + tensor input_115_beta_0_to_fp16 = const()[name = tensor("input_115_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578364672)))]; + tensor input_115_epsilon_0_to_fp16 = const()[name = tensor("input_115_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_115_cast_fp16 = batch_norm(beta = input_115_beta_0_to_fp16, epsilon = input_115_epsilon_0_to_fp16, gamma = input_115_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_59_cast_fp16)[name = tensor("input_115_cast_fp16")]; + tensor var_1927 = const()[name = tensor("op_1927"), val = tensor([1, 1])]; + tensor var_1929 = const()[name = tensor("op_1929"), val = tensor([1, 1])]; + tensor input_117_pad_type_0 = const()[name = tensor("input_117_pad_type_0"), val = tensor("custom")]; + tensor input_117_pad_0 = const()[name = tensor("input_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_fc1_weight_to_fp16 = const()[name = tensor("layers_14_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578367296)))]; + tensor layers_14_fc1_bias_to_fp16 = const()[name = tensor("layers_14_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591474560)))]; + tensor input_117_cast_fp16 = conv(bias = layers_14_fc1_bias_to_fp16, dilations = var_1929, groups = var_1832, pad = input_117_pad_0, pad_type = input_117_pad_type_0, strides = var_1927, weight = layers_14_fc1_weight_to_fp16, x = input_115_cast_fp16)[name = tensor("input_117_cast_fp16")]; + tensor input_119_mode_0 = const()[name = tensor("input_119_mode_0"), val = tensor("EXACT")]; + tensor input_119_cast_fp16 = gelu(mode = input_119_mode_0, x = input_117_cast_fp16)[name = tensor("input_119_cast_fp16")]; + tensor var_1935 = const()[name = tensor("op_1935"), val = tensor([1, 1])]; + tensor var_1937 = const()[name = tensor("op_1937"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_14_fc2_weight_to_fp16 = const()[name = tensor("layers_14_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591484864)))]; + tensor layers_14_fc2_bias_to_fp16 = const()[name = tensor("layers_14_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(604592128)))]; + tensor hidden_states_33_cast_fp16 = conv(bias = layers_14_fc2_bias_to_fp16, dilations = var_1937, groups = var_1832, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_1935, weight = layers_14_fc2_weight_to_fp16, x = input_119_cast_fp16)[name = tensor("hidden_states_33_cast_fp16")]; + tensor inputs_61_cast_fp16 = add(x = inputs_59_cast_fp16, y = hidden_states_33_cast_fp16)[name = tensor("inputs_61_cast_fp16")]; + tensor var_1948 = const()[name = tensor("op_1948"), val = tensor(3)]; + tensor var_1950 = const()[name = tensor("op_1950"), val = tensor(1)]; + tensor var_1951 = const()[name = tensor("op_1951"), val = tensor(true)]; + tensor var_1961 = const()[name = tensor("op_1961"), val = tensor([1])]; + tensor channels_mean_61_cast_fp16 = reduce_mean(axes = var_1961, keep_dims = var_1951, x = inputs_61_cast_fp16)[name = tensor("channels_mean_61_cast_fp16")]; + tensor zero_mean_61_cast_fp16 = sub(x = inputs_61_cast_fp16, y = channels_mean_61_cast_fp16)[name = tensor("zero_mean_61_cast_fp16")]; + tensor zero_mean_sq_61_cast_fp16 = mul(x = zero_mean_61_cast_fp16, y = zero_mean_61_cast_fp16)[name = tensor("zero_mean_sq_61_cast_fp16")]; + tensor var_1965 = const()[name = tensor("op_1965"), val = tensor([1])]; + tensor var_1966_cast_fp16 = reduce_mean(axes = var_1965, keep_dims = var_1951, x = zero_mean_sq_61_cast_fp16)[name = tensor("op_1966_cast_fp16")]; + tensor var_1967_to_fp16 = const()[name = tensor("op_1967_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1968_cast_fp16 = add(x = var_1966_cast_fp16, y = var_1967_to_fp16)[name = tensor("op_1968_cast_fp16")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast_fp16 = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_1968_cast_fp16)[name = tensor("denom_61_cast_fp16")]; + tensor out_61_cast_fp16 = mul(x = zero_mean_61_cast_fp16, y = denom_61_cast_fp16)[name = tensor("out_61_cast_fp16")]; + tensor obj_61_gamma_0_to_fp16 = const()[name = tensor("obj_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(604594752)))]; + tensor obj_61_beta_0_to_fp16 = const()[name = tensor("obj_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(604597376)))]; + tensor obj_61_epsilon_0_to_fp16 = const()[name = tensor("obj_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_61_cast_fp16 = batch_norm(beta = obj_61_beta_0_to_fp16, epsilon = obj_61_epsilon_0_to_fp16, gamma = obj_61_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_61_cast_fp16)[name = tensor("obj_61_cast_fp16")]; + tensor var_1983 = const()[name = tensor("op_1983"), val = tensor([1, 1])]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([1, 1])]; + tensor query_31_pad_type_0 = const()[name = tensor("query_31_pad_type_0"), val = tensor("custom")]; + tensor query_31_pad_0 = const()[name = tensor("query_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_15_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(604600000)))]; + tensor layers_15_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_15_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607876864)))]; + tensor query_31_cast_fp16 = conv(bias = layers_15_self_attn_q_proj_bias_to_fp16, dilations = var_1985, groups = var_1950, pad = query_31_pad_0, pad_type = query_31_pad_type_0, strides = var_1983, weight = layers_15_self_attn_q_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = tensor("query_31_cast_fp16")]; + tensor var_1989 = const()[name = tensor("op_1989"), val = tensor([1, 1])]; + tensor var_1991 = const()[name = tensor("op_1991"), val = tensor([1, 1])]; + tensor key_31_pad_type_0 = const()[name = tensor("key_31_pad_type_0"), val = tensor("custom")]; + tensor key_31_pad_0 = const()[name = tensor("key_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_15_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607879488)))]; + tensor key_31_cast_fp16 = conv(dilations = var_1991, groups = var_1950, pad = key_31_pad_0, pad_type = key_31_pad_type_0, strides = var_1989, weight = layers_15_self_attn_k_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = tensor("key_31_cast_fp16")]; + tensor var_1996 = const()[name = tensor("op_1996"), val = tensor([1, 1])]; + tensor var_1998 = const()[name = tensor("op_1998"), val = tensor([1, 1])]; + tensor value_31_pad_type_0 = const()[name = tensor("value_31_pad_type_0"), val = tensor("custom")]; + tensor value_31_pad_0 = const()[name = tensor("value_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_15_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(611156352)))]; + tensor layers_15_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_15_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614433216)))]; + tensor value_31_cast_fp16 = conv(bias = layers_15_self_attn_v_proj_bias_to_fp16, dilations = var_1998, groups = var_1950, pad = value_31_pad_0, pad_type = value_31_pad_type_0, strides = var_1996, weight = layers_15_self_attn_v_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = tensor("value_31_cast_fp16")]; + tensor var_2002 = const()[name = tensor("op_2002"), val = tensor([1, 20, 64, -1])]; + tensor var_2003_cast_fp16 = reshape(shape = var_2002, x = query_31_cast_fp16)[name = tensor("op_2003_cast_fp16")]; + tensor var_2004_to_fp16 = const()[name = tensor("op_2004_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2005_cast_fp16 = mul(x = var_2003_cast_fp16, y = var_2004_to_fp16)[name = tensor("op_2005_cast_fp16")]; + tensor var_2006 = const()[name = tensor("op_2006"), val = tensor([1, 20, 64, -1])]; + tensor var_2007_cast_fp16 = reshape(shape = var_2006, x = key_31_cast_fp16)[name = tensor("op_2007_cast_fp16")]; + tensor mh_w_31_transpose_x_0 = const()[name = tensor("mh_w_31_transpose_x_0"), val = tensor(true)]; + tensor mh_w_31_transpose_y_0 = const()[name = tensor("mh_w_31_transpose_y_0"), val = tensor(false)]; + tensor mh_w_31_cast_fp16 = matmul(transpose_x = mh_w_31_transpose_x_0, transpose_y = mh_w_31_transpose_y_0, x = var_2005_cast_fp16, y = var_2007_cast_fp16)[name = tensor("mh_w_31_cast_fp16")]; + tensor var_2010_cast_fp16 = softmax(axis = var_1948, x = mh_w_31_cast_fp16)[name = tensor("op_2010_cast_fp16")]; + tensor var_2011 = const()[name = tensor("op_2011"), val = tensor([1, 20, 64, -1])]; + tensor var_2012_cast_fp16 = reshape(shape = var_2011, x = value_31_cast_fp16)[name = tensor("op_2012_cast_fp16")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast_fp16 = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_2012_cast_fp16, y = var_2010_cast_fp16)[name = tensor("attn_31_cast_fp16")]; + tensor var_2015 = const()[name = tensor("op_2015"), val = tensor([1, 1280, 1, -1])]; + tensor input_121_cast_fp16 = reshape(shape = var_2015, x = attn_31_cast_fp16)[name = tensor("input_121_cast_fp16")]; + tensor var_2019 = const()[name = tensor("op_2019"), val = tensor([1, 1])]; + tensor var_2021 = const()[name = tensor("op_2021"), val = tensor([1, 1])]; + tensor obj_63_pad_type_0 = const()[name = tensor("obj_63_pad_type_0"), val = tensor("custom")]; + tensor obj_63_pad_0 = const()[name = tensor("obj_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_15_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614435840)))]; + tensor layers_15_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_15_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617712704)))]; + tensor obj_63_cast_fp16 = conv(bias = layers_15_self_attn_o_proj_bias_to_fp16, dilations = var_2021, groups = var_1950, pad = obj_63_pad_0, pad_type = obj_63_pad_type_0, strides = var_2019, weight = layers_15_self_attn_o_proj_weight_to_fp16, x = input_121_cast_fp16)[name = tensor("obj_63_cast_fp16")]; + tensor inputs_63_cast_fp16 = add(x = inputs_61_cast_fp16, y = obj_63_cast_fp16)[name = tensor("inputs_63_cast_fp16")]; + tensor var_2027 = const()[name = tensor("op_2027"), val = tensor([1])]; + tensor channels_mean_63_cast_fp16 = reduce_mean(axes = var_2027, keep_dims = var_1951, x = inputs_63_cast_fp16)[name = tensor("channels_mean_63_cast_fp16")]; + tensor zero_mean_63_cast_fp16 = sub(x = inputs_63_cast_fp16, y = channels_mean_63_cast_fp16)[name = tensor("zero_mean_63_cast_fp16")]; + tensor zero_mean_sq_63_cast_fp16 = mul(x = zero_mean_63_cast_fp16, y = zero_mean_63_cast_fp16)[name = tensor("zero_mean_sq_63_cast_fp16")]; + tensor var_2031 = const()[name = tensor("op_2031"), val = tensor([1])]; + tensor var_2032_cast_fp16 = reduce_mean(axes = var_2031, keep_dims = var_1951, x = zero_mean_sq_63_cast_fp16)[name = tensor("op_2032_cast_fp16")]; + tensor var_2033_to_fp16 = const()[name = tensor("op_2033_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2034_cast_fp16 = add(x = var_2032_cast_fp16, y = var_2033_to_fp16)[name = tensor("op_2034_cast_fp16")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast_fp16 = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2034_cast_fp16)[name = tensor("denom_63_cast_fp16")]; + tensor out_63_cast_fp16 = mul(x = zero_mean_63_cast_fp16, y = denom_63_cast_fp16)[name = tensor("out_63_cast_fp16")]; + tensor input_123_gamma_0_to_fp16 = const()[name = tensor("input_123_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617715328)))]; + tensor input_123_beta_0_to_fp16 = const()[name = tensor("input_123_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617717952)))]; + tensor input_123_epsilon_0_to_fp16 = const()[name = tensor("input_123_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_123_cast_fp16 = batch_norm(beta = input_123_beta_0_to_fp16, epsilon = input_123_epsilon_0_to_fp16, gamma = input_123_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_63_cast_fp16)[name = tensor("input_123_cast_fp16")]; + tensor var_2045 = const()[name = tensor("op_2045"), val = tensor([1, 1])]; + tensor var_2047 = const()[name = tensor("op_2047"), val = tensor([1, 1])]; + tensor input_125_pad_type_0 = const()[name = tensor("input_125_pad_type_0"), val = tensor("custom")]; + tensor input_125_pad_0 = const()[name = tensor("input_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_fc1_weight_to_fp16 = const()[name = tensor("layers_15_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617720576)))]; + tensor layers_15_fc1_bias_to_fp16 = const()[name = tensor("layers_15_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(630827840)))]; + tensor input_125_cast_fp16 = conv(bias = layers_15_fc1_bias_to_fp16, dilations = var_2047, groups = var_1950, pad = input_125_pad_0, pad_type = input_125_pad_type_0, strides = var_2045, weight = layers_15_fc1_weight_to_fp16, x = input_123_cast_fp16)[name = tensor("input_125_cast_fp16")]; + tensor input_127_mode_0 = const()[name = tensor("input_127_mode_0"), val = tensor("EXACT")]; + tensor input_127_cast_fp16 = gelu(mode = input_127_mode_0, x = input_125_cast_fp16)[name = tensor("input_127_cast_fp16")]; + tensor var_2053 = const()[name = tensor("op_2053"), val = tensor([1, 1])]; + tensor var_2055 = const()[name = tensor("op_2055"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_15_fc2_weight_to_fp16 = const()[name = tensor("layers_15_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(630838144)))]; + tensor layers_15_fc2_bias_to_fp16 = const()[name = tensor("layers_15_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643945408)))]; + tensor hidden_states_35_cast_fp16 = conv(bias = layers_15_fc2_bias_to_fp16, dilations = var_2055, groups = var_1950, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_2053, weight = layers_15_fc2_weight_to_fp16, x = input_127_cast_fp16)[name = tensor("hidden_states_35_cast_fp16")]; + tensor inputs_65_cast_fp16 = add(x = inputs_63_cast_fp16, y = hidden_states_35_cast_fp16)[name = tensor("inputs_65_cast_fp16")]; + tensor var_2066 = const()[name = tensor("op_2066"), val = tensor(3)]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor(1)]; + tensor var_2069 = const()[name = tensor("op_2069"), val = tensor(true)]; + tensor var_2079 = const()[name = tensor("op_2079"), val = tensor([1])]; + tensor channels_mean_65_cast_fp16 = reduce_mean(axes = var_2079, keep_dims = var_2069, x = inputs_65_cast_fp16)[name = tensor("channels_mean_65_cast_fp16")]; + tensor zero_mean_65_cast_fp16 = sub(x = inputs_65_cast_fp16, y = channels_mean_65_cast_fp16)[name = tensor("zero_mean_65_cast_fp16")]; + tensor zero_mean_sq_65_cast_fp16 = mul(x = zero_mean_65_cast_fp16, y = zero_mean_65_cast_fp16)[name = tensor("zero_mean_sq_65_cast_fp16")]; + tensor var_2083 = const()[name = tensor("op_2083"), val = tensor([1])]; + tensor var_2084_cast_fp16 = reduce_mean(axes = var_2083, keep_dims = var_2069, x = zero_mean_sq_65_cast_fp16)[name = tensor("op_2084_cast_fp16")]; + tensor var_2085_to_fp16 = const()[name = tensor("op_2085_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2086_cast_fp16 = add(x = var_2084_cast_fp16, y = var_2085_to_fp16)[name = tensor("op_2086_cast_fp16")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast_fp16 = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2086_cast_fp16)[name = tensor("denom_65_cast_fp16")]; + tensor out_65_cast_fp16 = mul(x = zero_mean_65_cast_fp16, y = denom_65_cast_fp16)[name = tensor("out_65_cast_fp16")]; + tensor obj_65_gamma_0_to_fp16 = const()[name = tensor("obj_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643948032)))]; + tensor obj_65_beta_0_to_fp16 = const()[name = tensor("obj_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643950656)))]; + tensor obj_65_epsilon_0_to_fp16 = const()[name = tensor("obj_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_65_cast_fp16 = batch_norm(beta = obj_65_beta_0_to_fp16, epsilon = obj_65_epsilon_0_to_fp16, gamma = obj_65_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_65_cast_fp16)[name = tensor("obj_65_cast_fp16")]; + tensor var_2101 = const()[name = tensor("op_2101"), val = tensor([1, 1])]; + tensor var_2103 = const()[name = tensor("op_2103"), val = tensor([1, 1])]; + tensor query_33_pad_type_0 = const()[name = tensor("query_33_pad_type_0"), val = tensor("custom")]; + tensor query_33_pad_0 = const()[name = tensor("query_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_16_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643953280)))]; + tensor layers_16_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_16_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647230144)))]; + tensor query_33_cast_fp16 = conv(bias = layers_16_self_attn_q_proj_bias_to_fp16, dilations = var_2103, groups = var_2068, pad = query_33_pad_0, pad_type = query_33_pad_type_0, strides = var_2101, weight = layers_16_self_attn_q_proj_weight_to_fp16, x = obj_65_cast_fp16)[name = tensor("query_33_cast_fp16")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([1, 1])]; + tensor var_2109 = const()[name = tensor("op_2109"), val = tensor([1, 1])]; + tensor key_33_pad_type_0 = const()[name = tensor("key_33_pad_type_0"), val = tensor("custom")]; + tensor key_33_pad_0 = const()[name = tensor("key_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_16_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(647232768)))]; + tensor key_33_cast_fp16 = conv(dilations = var_2109, groups = var_2068, pad = key_33_pad_0, pad_type = key_33_pad_type_0, strides = var_2107, weight = layers_16_self_attn_k_proj_weight_to_fp16, x = obj_65_cast_fp16)[name = tensor("key_33_cast_fp16")]; + tensor var_2114 = const()[name = tensor("op_2114"), val = tensor([1, 1])]; + tensor var_2116 = const()[name = tensor("op_2116"), val = tensor([1, 1])]; + tensor value_33_pad_type_0 = const()[name = tensor("value_33_pad_type_0"), val = tensor("custom")]; + tensor value_33_pad_0 = const()[name = tensor("value_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_16_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(650509632)))]; + tensor layers_16_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_16_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653786496)))]; + tensor value_33_cast_fp16 = conv(bias = layers_16_self_attn_v_proj_bias_to_fp16, dilations = var_2116, groups = var_2068, pad = value_33_pad_0, pad_type = value_33_pad_type_0, strides = var_2114, weight = layers_16_self_attn_v_proj_weight_to_fp16, x = obj_65_cast_fp16)[name = tensor("value_33_cast_fp16")]; + tensor var_2120 = const()[name = tensor("op_2120"), val = tensor([1, 20, 64, -1])]; + tensor var_2121_cast_fp16 = reshape(shape = var_2120, x = query_33_cast_fp16)[name = tensor("op_2121_cast_fp16")]; + tensor var_2122_to_fp16 = const()[name = tensor("op_2122_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2123_cast_fp16 = mul(x = var_2121_cast_fp16, y = var_2122_to_fp16)[name = tensor("op_2123_cast_fp16")]; + tensor var_2124 = const()[name = tensor("op_2124"), val = tensor([1, 20, 64, -1])]; + tensor var_2125_cast_fp16 = reshape(shape = var_2124, x = key_33_cast_fp16)[name = tensor("op_2125_cast_fp16")]; + tensor mh_w_33_transpose_x_0 = const()[name = tensor("mh_w_33_transpose_x_0"), val = tensor(true)]; + tensor mh_w_33_transpose_y_0 = const()[name = tensor("mh_w_33_transpose_y_0"), val = tensor(false)]; + tensor mh_w_33_cast_fp16 = matmul(transpose_x = mh_w_33_transpose_x_0, transpose_y = mh_w_33_transpose_y_0, x = var_2123_cast_fp16, y = var_2125_cast_fp16)[name = tensor("mh_w_33_cast_fp16")]; + tensor var_2128_cast_fp16 = softmax(axis = var_2066, x = mh_w_33_cast_fp16)[name = tensor("op_2128_cast_fp16")]; + tensor var_2129 = const()[name = tensor("op_2129"), val = tensor([1, 20, 64, -1])]; + tensor var_2130_cast_fp16 = reshape(shape = var_2129, x = value_33_cast_fp16)[name = tensor("op_2130_cast_fp16")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast_fp16 = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_2130_cast_fp16, y = var_2128_cast_fp16)[name = tensor("attn_33_cast_fp16")]; + tensor var_2133 = const()[name = tensor("op_2133"), val = tensor([1, 1280, 1, -1])]; + tensor input_129_cast_fp16 = reshape(shape = var_2133, x = attn_33_cast_fp16)[name = tensor("input_129_cast_fp16")]; + tensor var_2137 = const()[name = tensor("op_2137"), val = tensor([1, 1])]; + tensor var_2139 = const()[name = tensor("op_2139"), val = tensor([1, 1])]; + tensor obj_67_pad_type_0 = const()[name = tensor("obj_67_pad_type_0"), val = tensor("custom")]; + tensor obj_67_pad_0 = const()[name = tensor("obj_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_16_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653789120)))]; + tensor layers_16_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_16_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657065984)))]; + tensor obj_67_cast_fp16 = conv(bias = layers_16_self_attn_o_proj_bias_to_fp16, dilations = var_2139, groups = var_2068, pad = obj_67_pad_0, pad_type = obj_67_pad_type_0, strides = var_2137, weight = layers_16_self_attn_o_proj_weight_to_fp16, x = input_129_cast_fp16)[name = tensor("obj_67_cast_fp16")]; + tensor inputs_67_cast_fp16 = add(x = inputs_65_cast_fp16, y = obj_67_cast_fp16)[name = tensor("inputs_67_cast_fp16")]; + tensor var_2145 = const()[name = tensor("op_2145"), val = tensor([1])]; + tensor channels_mean_67_cast_fp16 = reduce_mean(axes = var_2145, keep_dims = var_2069, x = inputs_67_cast_fp16)[name = tensor("channels_mean_67_cast_fp16")]; + tensor zero_mean_67_cast_fp16 = sub(x = inputs_67_cast_fp16, y = channels_mean_67_cast_fp16)[name = tensor("zero_mean_67_cast_fp16")]; + tensor zero_mean_sq_67_cast_fp16 = mul(x = zero_mean_67_cast_fp16, y = zero_mean_67_cast_fp16)[name = tensor("zero_mean_sq_67_cast_fp16")]; + tensor var_2149 = const()[name = tensor("op_2149"), val = tensor([1])]; + tensor var_2150_cast_fp16 = reduce_mean(axes = var_2149, keep_dims = var_2069, x = zero_mean_sq_67_cast_fp16)[name = tensor("op_2150_cast_fp16")]; + tensor var_2151_to_fp16 = const()[name = tensor("op_2151_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2152_cast_fp16 = add(x = var_2150_cast_fp16, y = var_2151_to_fp16)[name = tensor("op_2152_cast_fp16")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast_fp16 = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2152_cast_fp16)[name = tensor("denom_67_cast_fp16")]; + tensor out_67_cast_fp16 = mul(x = zero_mean_67_cast_fp16, y = denom_67_cast_fp16)[name = tensor("out_67_cast_fp16")]; + tensor input_131_gamma_0_to_fp16 = const()[name = tensor("input_131_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657068608)))]; + tensor input_131_beta_0_to_fp16 = const()[name = tensor("input_131_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657071232)))]; + tensor input_131_epsilon_0_to_fp16 = const()[name = tensor("input_131_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_131_cast_fp16 = batch_norm(beta = input_131_beta_0_to_fp16, epsilon = input_131_epsilon_0_to_fp16, gamma = input_131_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_67_cast_fp16)[name = tensor("input_131_cast_fp16")]; + tensor var_2163 = const()[name = tensor("op_2163"), val = tensor([1, 1])]; + tensor var_2165 = const()[name = tensor("op_2165"), val = tensor([1, 1])]; + tensor input_133_pad_type_0 = const()[name = tensor("input_133_pad_type_0"), val = tensor("custom")]; + tensor input_133_pad_0 = const()[name = tensor("input_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_fc1_weight_to_fp16 = const()[name = tensor("layers_16_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657073856)))]; + tensor layers_16_fc1_bias_to_fp16 = const()[name = tensor("layers_16_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(670181120)))]; + tensor input_133_cast_fp16 = conv(bias = layers_16_fc1_bias_to_fp16, dilations = var_2165, groups = var_2068, pad = input_133_pad_0, pad_type = input_133_pad_type_0, strides = var_2163, weight = layers_16_fc1_weight_to_fp16, x = input_131_cast_fp16)[name = tensor("input_133_cast_fp16")]; + tensor input_135_mode_0 = const()[name = tensor("input_135_mode_0"), val = tensor("EXACT")]; + tensor input_135_cast_fp16 = gelu(mode = input_135_mode_0, x = input_133_cast_fp16)[name = tensor("input_135_cast_fp16")]; + tensor var_2171 = const()[name = tensor("op_2171"), val = tensor([1, 1])]; + tensor var_2173 = const()[name = tensor("op_2173"), val = tensor([1, 1])]; + tensor hidden_states_37_pad_type_0 = const()[name = tensor("hidden_states_37_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_37_pad_0 = const()[name = tensor("hidden_states_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_16_fc2_weight_to_fp16 = const()[name = tensor("layers_16_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(670191424)))]; + tensor layers_16_fc2_bias_to_fp16 = const()[name = tensor("layers_16_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683298688)))]; + tensor hidden_states_37_cast_fp16 = conv(bias = layers_16_fc2_bias_to_fp16, dilations = var_2173, groups = var_2068, pad = hidden_states_37_pad_0, pad_type = hidden_states_37_pad_type_0, strides = var_2171, weight = layers_16_fc2_weight_to_fp16, x = input_135_cast_fp16)[name = tensor("hidden_states_37_cast_fp16")]; + tensor inputs_69_cast_fp16 = add(x = inputs_67_cast_fp16, y = hidden_states_37_cast_fp16)[name = tensor("inputs_69_cast_fp16")]; + tensor var_2184 = const()[name = tensor("op_2184"), val = tensor(3)]; + tensor var_2186 = const()[name = tensor("op_2186"), val = tensor(1)]; + tensor var_2187 = const()[name = tensor("op_2187"), val = tensor(true)]; + tensor var_2197 = const()[name = tensor("op_2197"), val = tensor([1])]; + tensor channels_mean_69_cast_fp16 = reduce_mean(axes = var_2197, keep_dims = var_2187, x = inputs_69_cast_fp16)[name = tensor("channels_mean_69_cast_fp16")]; + tensor zero_mean_69_cast_fp16 = sub(x = inputs_69_cast_fp16, y = channels_mean_69_cast_fp16)[name = tensor("zero_mean_69_cast_fp16")]; + tensor zero_mean_sq_69_cast_fp16 = mul(x = zero_mean_69_cast_fp16, y = zero_mean_69_cast_fp16)[name = tensor("zero_mean_sq_69_cast_fp16")]; + tensor var_2201 = const()[name = tensor("op_2201"), val = tensor([1])]; + tensor var_2202_cast_fp16 = reduce_mean(axes = var_2201, keep_dims = var_2187, x = zero_mean_sq_69_cast_fp16)[name = tensor("op_2202_cast_fp16")]; + tensor var_2203_to_fp16 = const()[name = tensor("op_2203_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2204_cast_fp16 = add(x = var_2202_cast_fp16, y = var_2203_to_fp16)[name = tensor("op_2204_cast_fp16")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast_fp16 = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2204_cast_fp16)[name = tensor("denom_69_cast_fp16")]; + tensor out_69_cast_fp16 = mul(x = zero_mean_69_cast_fp16, y = denom_69_cast_fp16)[name = tensor("out_69_cast_fp16")]; + tensor obj_69_gamma_0_to_fp16 = const()[name = tensor("obj_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683301312)))]; + tensor obj_69_beta_0_to_fp16 = const()[name = tensor("obj_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683303936)))]; + tensor obj_69_epsilon_0_to_fp16 = const()[name = tensor("obj_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_69_cast_fp16 = batch_norm(beta = obj_69_beta_0_to_fp16, epsilon = obj_69_epsilon_0_to_fp16, gamma = obj_69_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_69_cast_fp16)[name = tensor("obj_69_cast_fp16")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([1, 1])]; + tensor var_2221 = const()[name = tensor("op_2221"), val = tensor([1, 1])]; + tensor query_35_pad_type_0 = const()[name = tensor("query_35_pad_type_0"), val = tensor("custom")]; + tensor query_35_pad_0 = const()[name = tensor("query_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_17_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683306560)))]; + tensor layers_17_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_17_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686583424)))]; + tensor query_35_cast_fp16 = conv(bias = layers_17_self_attn_q_proj_bias_to_fp16, dilations = var_2221, groups = var_2186, pad = query_35_pad_0, pad_type = query_35_pad_type_0, strides = var_2219, weight = layers_17_self_attn_q_proj_weight_to_fp16, x = obj_69_cast_fp16)[name = tensor("query_35_cast_fp16")]; + tensor var_2225 = const()[name = tensor("op_2225"), val = tensor([1, 1])]; + tensor var_2227 = const()[name = tensor("op_2227"), val = tensor([1, 1])]; + tensor key_35_pad_type_0 = const()[name = tensor("key_35_pad_type_0"), val = tensor("custom")]; + tensor key_35_pad_0 = const()[name = tensor("key_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_17_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686586048)))]; + tensor key_35_cast_fp16 = conv(dilations = var_2227, groups = var_2186, pad = key_35_pad_0, pad_type = key_35_pad_type_0, strides = var_2225, weight = layers_17_self_attn_k_proj_weight_to_fp16, x = obj_69_cast_fp16)[name = tensor("key_35_cast_fp16")]; + tensor var_2232 = const()[name = tensor("op_2232"), val = tensor([1, 1])]; + tensor var_2234 = const()[name = tensor("op_2234"), val = tensor([1, 1])]; + tensor value_35_pad_type_0 = const()[name = tensor("value_35_pad_type_0"), val = tensor("custom")]; + tensor value_35_pad_0 = const()[name = tensor("value_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_17_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689862912)))]; + tensor layers_17_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_17_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(693139776)))]; + tensor value_35_cast_fp16 = conv(bias = layers_17_self_attn_v_proj_bias_to_fp16, dilations = var_2234, groups = var_2186, pad = value_35_pad_0, pad_type = value_35_pad_type_0, strides = var_2232, weight = layers_17_self_attn_v_proj_weight_to_fp16, x = obj_69_cast_fp16)[name = tensor("value_35_cast_fp16")]; + tensor var_2238 = const()[name = tensor("op_2238"), val = tensor([1, 20, 64, -1])]; + tensor var_2239_cast_fp16 = reshape(shape = var_2238, x = query_35_cast_fp16)[name = tensor("op_2239_cast_fp16")]; + tensor var_2240_to_fp16 = const()[name = tensor("op_2240_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2241_cast_fp16 = mul(x = var_2239_cast_fp16, y = var_2240_to_fp16)[name = tensor("op_2241_cast_fp16")]; + tensor var_2242 = const()[name = tensor("op_2242"), val = tensor([1, 20, 64, -1])]; + tensor var_2243_cast_fp16 = reshape(shape = var_2242, x = key_35_cast_fp16)[name = tensor("op_2243_cast_fp16")]; + tensor mh_w_35_transpose_x_0 = const()[name = tensor("mh_w_35_transpose_x_0"), val = tensor(true)]; + tensor mh_w_35_transpose_y_0 = const()[name = tensor("mh_w_35_transpose_y_0"), val = tensor(false)]; + tensor mh_w_35_cast_fp16 = matmul(transpose_x = mh_w_35_transpose_x_0, transpose_y = mh_w_35_transpose_y_0, x = var_2241_cast_fp16, y = var_2243_cast_fp16)[name = tensor("mh_w_35_cast_fp16")]; + tensor var_2246_cast_fp16 = softmax(axis = var_2184, x = mh_w_35_cast_fp16)[name = tensor("op_2246_cast_fp16")]; + tensor var_2247 = const()[name = tensor("op_2247"), val = tensor([1, 20, 64, -1])]; + tensor var_2248_cast_fp16 = reshape(shape = var_2247, x = value_35_cast_fp16)[name = tensor("op_2248_cast_fp16")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast_fp16 = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2248_cast_fp16, y = var_2246_cast_fp16)[name = tensor("attn_35_cast_fp16")]; + tensor var_2251 = const()[name = tensor("op_2251"), val = tensor([1, 1280, 1, -1])]; + tensor input_137_cast_fp16 = reshape(shape = var_2251, x = attn_35_cast_fp16)[name = tensor("input_137_cast_fp16")]; + tensor var_2255 = const()[name = tensor("op_2255"), val = tensor([1, 1])]; + tensor var_2257 = const()[name = tensor("op_2257"), val = tensor([1, 1])]; + tensor obj_71_pad_type_0 = const()[name = tensor("obj_71_pad_type_0"), val = tensor("custom")]; + tensor obj_71_pad_0 = const()[name = tensor("obj_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_17_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(693142400)))]; + tensor layers_17_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_17_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696419264)))]; + tensor obj_71_cast_fp16 = conv(bias = layers_17_self_attn_o_proj_bias_to_fp16, dilations = var_2257, groups = var_2186, pad = obj_71_pad_0, pad_type = obj_71_pad_type_0, strides = var_2255, weight = layers_17_self_attn_o_proj_weight_to_fp16, x = input_137_cast_fp16)[name = tensor("obj_71_cast_fp16")]; + tensor inputs_71_cast_fp16 = add(x = inputs_69_cast_fp16, y = obj_71_cast_fp16)[name = tensor("inputs_71_cast_fp16")]; + tensor var_2263 = const()[name = tensor("op_2263"), val = tensor([1])]; + tensor channels_mean_71_cast_fp16 = reduce_mean(axes = var_2263, keep_dims = var_2187, x = inputs_71_cast_fp16)[name = tensor("channels_mean_71_cast_fp16")]; + tensor zero_mean_71_cast_fp16 = sub(x = inputs_71_cast_fp16, y = channels_mean_71_cast_fp16)[name = tensor("zero_mean_71_cast_fp16")]; + tensor zero_mean_sq_71_cast_fp16 = mul(x = zero_mean_71_cast_fp16, y = zero_mean_71_cast_fp16)[name = tensor("zero_mean_sq_71_cast_fp16")]; + tensor var_2267 = const()[name = tensor("op_2267"), val = tensor([1])]; + tensor var_2268_cast_fp16 = reduce_mean(axes = var_2267, keep_dims = var_2187, x = zero_mean_sq_71_cast_fp16)[name = tensor("op_2268_cast_fp16")]; + tensor var_2269_to_fp16 = const()[name = tensor("op_2269_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2270_cast_fp16 = add(x = var_2268_cast_fp16, y = var_2269_to_fp16)[name = tensor("op_2270_cast_fp16")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast_fp16 = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2270_cast_fp16)[name = tensor("denom_71_cast_fp16")]; + tensor out_71_cast_fp16 = mul(x = zero_mean_71_cast_fp16, y = denom_71_cast_fp16)[name = tensor("out_71_cast_fp16")]; + tensor input_139_gamma_0_to_fp16 = const()[name = tensor("input_139_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696421888)))]; + tensor input_139_beta_0_to_fp16 = const()[name = tensor("input_139_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696424512)))]; + tensor input_139_epsilon_0_to_fp16 = const()[name = tensor("input_139_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_139_cast_fp16 = batch_norm(beta = input_139_beta_0_to_fp16, epsilon = input_139_epsilon_0_to_fp16, gamma = input_139_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_71_cast_fp16)[name = tensor("input_139_cast_fp16")]; + tensor var_2281 = const()[name = tensor("op_2281"), val = tensor([1, 1])]; + tensor var_2283 = const()[name = tensor("op_2283"), val = tensor([1, 1])]; + tensor input_141_pad_type_0 = const()[name = tensor("input_141_pad_type_0"), val = tensor("custom")]; + tensor input_141_pad_0 = const()[name = tensor("input_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_fc1_weight_to_fp16 = const()[name = tensor("layers_17_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696427136)))]; + tensor layers_17_fc1_bias_to_fp16 = const()[name = tensor("layers_17_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(709534400)))]; + tensor input_141_cast_fp16 = conv(bias = layers_17_fc1_bias_to_fp16, dilations = var_2283, groups = var_2186, pad = input_141_pad_0, pad_type = input_141_pad_type_0, strides = var_2281, weight = layers_17_fc1_weight_to_fp16, x = input_139_cast_fp16)[name = tensor("input_141_cast_fp16")]; + tensor input_143_mode_0 = const()[name = tensor("input_143_mode_0"), val = tensor("EXACT")]; + tensor input_143_cast_fp16 = gelu(mode = input_143_mode_0, x = input_141_cast_fp16)[name = tensor("input_143_cast_fp16")]; + tensor var_2289 = const()[name = tensor("op_2289"), val = tensor([1, 1])]; + tensor var_2291 = const()[name = tensor("op_2291"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_17_fc2_weight_to_fp16 = const()[name = tensor("layers_17_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(709544704)))]; + tensor layers_17_fc2_bias_to_fp16 = const()[name = tensor("layers_17_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722651968)))]; + tensor hidden_states_39_cast_fp16 = conv(bias = layers_17_fc2_bias_to_fp16, dilations = var_2291, groups = var_2186, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_2289, weight = layers_17_fc2_weight_to_fp16, x = input_143_cast_fp16)[name = tensor("hidden_states_39_cast_fp16")]; + tensor inputs_73_cast_fp16 = add(x = inputs_71_cast_fp16, y = hidden_states_39_cast_fp16)[name = tensor("inputs_73_cast_fp16")]; + tensor var_2302 = const()[name = tensor("op_2302"), val = tensor(3)]; + tensor var_2304 = const()[name = tensor("op_2304"), val = tensor(1)]; + tensor var_2305 = const()[name = tensor("op_2305"), val = tensor(true)]; + tensor var_2315 = const()[name = tensor("op_2315"), val = tensor([1])]; + tensor channels_mean_73_cast_fp16 = reduce_mean(axes = var_2315, keep_dims = var_2305, x = inputs_73_cast_fp16)[name = tensor("channels_mean_73_cast_fp16")]; + tensor zero_mean_73_cast_fp16 = sub(x = inputs_73_cast_fp16, y = channels_mean_73_cast_fp16)[name = tensor("zero_mean_73_cast_fp16")]; + tensor zero_mean_sq_73_cast_fp16 = mul(x = zero_mean_73_cast_fp16, y = zero_mean_73_cast_fp16)[name = tensor("zero_mean_sq_73_cast_fp16")]; + tensor var_2319 = const()[name = tensor("op_2319"), val = tensor([1])]; + tensor var_2320_cast_fp16 = reduce_mean(axes = var_2319, keep_dims = var_2305, x = zero_mean_sq_73_cast_fp16)[name = tensor("op_2320_cast_fp16")]; + tensor var_2321_to_fp16 = const()[name = tensor("op_2321_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2322_cast_fp16 = add(x = var_2320_cast_fp16, y = var_2321_to_fp16)[name = tensor("op_2322_cast_fp16")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast_fp16 = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2322_cast_fp16)[name = tensor("denom_73_cast_fp16")]; + tensor out_73_cast_fp16 = mul(x = zero_mean_73_cast_fp16, y = denom_73_cast_fp16)[name = tensor("out_73_cast_fp16")]; + tensor obj_73_gamma_0_to_fp16 = const()[name = tensor("obj_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722654592)))]; + tensor obj_73_beta_0_to_fp16 = const()[name = tensor("obj_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722657216)))]; + tensor obj_73_epsilon_0_to_fp16 = const()[name = tensor("obj_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_73_cast_fp16 = batch_norm(beta = obj_73_beta_0_to_fp16, epsilon = obj_73_epsilon_0_to_fp16, gamma = obj_73_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_73_cast_fp16)[name = tensor("obj_73_cast_fp16")]; + tensor var_2337 = const()[name = tensor("op_2337"), val = tensor([1, 1])]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([1, 1])]; + tensor query_37_pad_type_0 = const()[name = tensor("query_37_pad_type_0"), val = tensor("custom")]; + tensor query_37_pad_0 = const()[name = tensor("query_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_18_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722659840)))]; + tensor layers_18_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_18_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725936704)))]; + tensor query_37_cast_fp16 = conv(bias = layers_18_self_attn_q_proj_bias_to_fp16, dilations = var_2339, groups = var_2304, pad = query_37_pad_0, pad_type = query_37_pad_type_0, strides = var_2337, weight = layers_18_self_attn_q_proj_weight_to_fp16, x = obj_73_cast_fp16)[name = tensor("query_37_cast_fp16")]; + tensor var_2343 = const()[name = tensor("op_2343"), val = tensor([1, 1])]; + tensor var_2345 = const()[name = tensor("op_2345"), val = tensor([1, 1])]; + tensor key_37_pad_type_0 = const()[name = tensor("key_37_pad_type_0"), val = tensor("custom")]; + tensor key_37_pad_0 = const()[name = tensor("key_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_18_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725939328)))]; + tensor key_37_cast_fp16 = conv(dilations = var_2345, groups = var_2304, pad = key_37_pad_0, pad_type = key_37_pad_type_0, strides = var_2343, weight = layers_18_self_attn_k_proj_weight_to_fp16, x = obj_73_cast_fp16)[name = tensor("key_37_cast_fp16")]; + tensor var_2350 = const()[name = tensor("op_2350"), val = tensor([1, 1])]; + tensor var_2352 = const()[name = tensor("op_2352"), val = tensor([1, 1])]; + tensor value_37_pad_type_0 = const()[name = tensor("value_37_pad_type_0"), val = tensor("custom")]; + tensor value_37_pad_0 = const()[name = tensor("value_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_18_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(729216192)))]; + tensor layers_18_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_18_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732493056)))]; + tensor value_37_cast_fp16 = conv(bias = layers_18_self_attn_v_proj_bias_to_fp16, dilations = var_2352, groups = var_2304, pad = value_37_pad_0, pad_type = value_37_pad_type_0, strides = var_2350, weight = layers_18_self_attn_v_proj_weight_to_fp16, x = obj_73_cast_fp16)[name = tensor("value_37_cast_fp16")]; + tensor var_2356 = const()[name = tensor("op_2356"), val = tensor([1, 20, 64, -1])]; + tensor var_2357_cast_fp16 = reshape(shape = var_2356, x = query_37_cast_fp16)[name = tensor("op_2357_cast_fp16")]; + tensor var_2358_to_fp16 = const()[name = tensor("op_2358_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2359_cast_fp16 = mul(x = var_2357_cast_fp16, y = var_2358_to_fp16)[name = tensor("op_2359_cast_fp16")]; + tensor var_2360 = const()[name = tensor("op_2360"), val = tensor([1, 20, 64, -1])]; + tensor var_2361_cast_fp16 = reshape(shape = var_2360, x = key_37_cast_fp16)[name = tensor("op_2361_cast_fp16")]; + tensor mh_w_37_transpose_x_0 = const()[name = tensor("mh_w_37_transpose_x_0"), val = tensor(true)]; + tensor mh_w_37_transpose_y_0 = const()[name = tensor("mh_w_37_transpose_y_0"), val = tensor(false)]; + tensor mh_w_37_cast_fp16 = matmul(transpose_x = mh_w_37_transpose_x_0, transpose_y = mh_w_37_transpose_y_0, x = var_2359_cast_fp16, y = var_2361_cast_fp16)[name = tensor("mh_w_37_cast_fp16")]; + tensor var_2364_cast_fp16 = softmax(axis = var_2302, x = mh_w_37_cast_fp16)[name = tensor("op_2364_cast_fp16")]; + tensor var_2365 = const()[name = tensor("op_2365"), val = tensor([1, 20, 64, -1])]; + tensor var_2366_cast_fp16 = reshape(shape = var_2365, x = value_37_cast_fp16)[name = tensor("op_2366_cast_fp16")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast_fp16 = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2366_cast_fp16, y = var_2364_cast_fp16)[name = tensor("attn_37_cast_fp16")]; + tensor var_2369 = const()[name = tensor("op_2369"), val = tensor([1, 1280, 1, -1])]; + tensor input_145_cast_fp16 = reshape(shape = var_2369, x = attn_37_cast_fp16)[name = tensor("input_145_cast_fp16")]; + tensor var_2373 = const()[name = tensor("op_2373"), val = tensor([1, 1])]; + tensor var_2375 = const()[name = tensor("op_2375"), val = tensor([1, 1])]; + tensor obj_75_pad_type_0 = const()[name = tensor("obj_75_pad_type_0"), val = tensor("custom")]; + tensor obj_75_pad_0 = const()[name = tensor("obj_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_18_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732495680)))]; + tensor layers_18_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_18_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735772544)))]; + tensor obj_75_cast_fp16 = conv(bias = layers_18_self_attn_o_proj_bias_to_fp16, dilations = var_2375, groups = var_2304, pad = obj_75_pad_0, pad_type = obj_75_pad_type_0, strides = var_2373, weight = layers_18_self_attn_o_proj_weight_to_fp16, x = input_145_cast_fp16)[name = tensor("obj_75_cast_fp16")]; + tensor inputs_75_cast_fp16 = add(x = inputs_73_cast_fp16, y = obj_75_cast_fp16)[name = tensor("inputs_75_cast_fp16")]; + tensor var_2381 = const()[name = tensor("op_2381"), val = tensor([1])]; + tensor channels_mean_75_cast_fp16 = reduce_mean(axes = var_2381, keep_dims = var_2305, x = inputs_75_cast_fp16)[name = tensor("channels_mean_75_cast_fp16")]; + tensor zero_mean_75_cast_fp16 = sub(x = inputs_75_cast_fp16, y = channels_mean_75_cast_fp16)[name = tensor("zero_mean_75_cast_fp16")]; + tensor zero_mean_sq_75_cast_fp16 = mul(x = zero_mean_75_cast_fp16, y = zero_mean_75_cast_fp16)[name = tensor("zero_mean_sq_75_cast_fp16")]; + tensor var_2385 = const()[name = tensor("op_2385"), val = tensor([1])]; + tensor var_2386_cast_fp16 = reduce_mean(axes = var_2385, keep_dims = var_2305, x = zero_mean_sq_75_cast_fp16)[name = tensor("op_2386_cast_fp16")]; + tensor var_2387_to_fp16 = const()[name = tensor("op_2387_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2388_cast_fp16 = add(x = var_2386_cast_fp16, y = var_2387_to_fp16)[name = tensor("op_2388_cast_fp16")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast_fp16 = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2388_cast_fp16)[name = tensor("denom_75_cast_fp16")]; + tensor out_75_cast_fp16 = mul(x = zero_mean_75_cast_fp16, y = denom_75_cast_fp16)[name = tensor("out_75_cast_fp16")]; + tensor input_147_gamma_0_to_fp16 = const()[name = tensor("input_147_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735775168)))]; + tensor input_147_beta_0_to_fp16 = const()[name = tensor("input_147_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735777792)))]; + tensor input_147_epsilon_0_to_fp16 = const()[name = tensor("input_147_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_147_cast_fp16 = batch_norm(beta = input_147_beta_0_to_fp16, epsilon = input_147_epsilon_0_to_fp16, gamma = input_147_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_75_cast_fp16)[name = tensor("input_147_cast_fp16")]; + tensor var_2399 = const()[name = tensor("op_2399"), val = tensor([1, 1])]; + tensor var_2401 = const()[name = tensor("op_2401"), val = tensor([1, 1])]; + tensor input_149_pad_type_0 = const()[name = tensor("input_149_pad_type_0"), val = tensor("custom")]; + tensor input_149_pad_0 = const()[name = tensor("input_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_fc1_weight_to_fp16 = const()[name = tensor("layers_18_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735780416)))]; + tensor layers_18_fc1_bias_to_fp16 = const()[name = tensor("layers_18_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748887680)))]; + tensor input_149_cast_fp16 = conv(bias = layers_18_fc1_bias_to_fp16, dilations = var_2401, groups = var_2304, pad = input_149_pad_0, pad_type = input_149_pad_type_0, strides = var_2399, weight = layers_18_fc1_weight_to_fp16, x = input_147_cast_fp16)[name = tensor("input_149_cast_fp16")]; + tensor input_151_mode_0 = const()[name = tensor("input_151_mode_0"), val = tensor("EXACT")]; + tensor input_151_cast_fp16 = gelu(mode = input_151_mode_0, x = input_149_cast_fp16)[name = tensor("input_151_cast_fp16")]; + tensor var_2407 = const()[name = tensor("op_2407"), val = tensor([1, 1])]; + tensor var_2409 = const()[name = tensor("op_2409"), val = tensor([1, 1])]; + tensor hidden_states_41_pad_type_0 = const()[name = tensor("hidden_states_41_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_41_pad_0 = const()[name = tensor("hidden_states_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_18_fc2_weight_to_fp16 = const()[name = tensor("layers_18_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748897984)))]; + tensor layers_18_fc2_bias_to_fp16 = const()[name = tensor("layers_18_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762005248)))]; + tensor hidden_states_41_cast_fp16 = conv(bias = layers_18_fc2_bias_to_fp16, dilations = var_2409, groups = var_2304, pad = hidden_states_41_pad_0, pad_type = hidden_states_41_pad_type_0, strides = var_2407, weight = layers_18_fc2_weight_to_fp16, x = input_151_cast_fp16)[name = tensor("hidden_states_41_cast_fp16")]; + tensor inputs_77_cast_fp16 = add(x = inputs_75_cast_fp16, y = hidden_states_41_cast_fp16)[name = tensor("inputs_77_cast_fp16")]; + tensor var_2420 = const()[name = tensor("op_2420"), val = tensor(3)]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor(1)]; + tensor var_2423 = const()[name = tensor("op_2423"), val = tensor(true)]; + tensor var_2433 = const()[name = tensor("op_2433"), val = tensor([1])]; + tensor channels_mean_77_cast_fp16 = reduce_mean(axes = var_2433, keep_dims = var_2423, x = inputs_77_cast_fp16)[name = tensor("channels_mean_77_cast_fp16")]; + tensor zero_mean_77_cast_fp16 = sub(x = inputs_77_cast_fp16, y = channels_mean_77_cast_fp16)[name = tensor("zero_mean_77_cast_fp16")]; + tensor zero_mean_sq_77_cast_fp16 = mul(x = zero_mean_77_cast_fp16, y = zero_mean_77_cast_fp16)[name = tensor("zero_mean_sq_77_cast_fp16")]; + tensor var_2437 = const()[name = tensor("op_2437"), val = tensor([1])]; + tensor var_2438_cast_fp16 = reduce_mean(axes = var_2437, keep_dims = var_2423, x = zero_mean_sq_77_cast_fp16)[name = tensor("op_2438_cast_fp16")]; + tensor var_2439_to_fp16 = const()[name = tensor("op_2439_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2440_cast_fp16 = add(x = var_2438_cast_fp16, y = var_2439_to_fp16)[name = tensor("op_2440_cast_fp16")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast_fp16 = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2440_cast_fp16)[name = tensor("denom_77_cast_fp16")]; + tensor out_77_cast_fp16 = mul(x = zero_mean_77_cast_fp16, y = denom_77_cast_fp16)[name = tensor("out_77_cast_fp16")]; + tensor obj_77_gamma_0_to_fp16 = const()[name = tensor("obj_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762007872)))]; + tensor obj_77_beta_0_to_fp16 = const()[name = tensor("obj_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762010496)))]; + tensor obj_77_epsilon_0_to_fp16 = const()[name = tensor("obj_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_77_cast_fp16 = batch_norm(beta = obj_77_beta_0_to_fp16, epsilon = obj_77_epsilon_0_to_fp16, gamma = obj_77_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_77_cast_fp16)[name = tensor("obj_77_cast_fp16")]; + tensor var_2455 = const()[name = tensor("op_2455"), val = tensor([1, 1])]; + tensor var_2457 = const()[name = tensor("op_2457"), val = tensor([1, 1])]; + tensor query_39_pad_type_0 = const()[name = tensor("query_39_pad_type_0"), val = tensor("custom")]; + tensor query_39_pad_0 = const()[name = tensor("query_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_19_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762013120)))]; + tensor layers_19_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_19_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765289984)))]; + tensor query_39_cast_fp16 = conv(bias = layers_19_self_attn_q_proj_bias_to_fp16, dilations = var_2457, groups = var_2422, pad = query_39_pad_0, pad_type = query_39_pad_type_0, strides = var_2455, weight = layers_19_self_attn_q_proj_weight_to_fp16, x = obj_77_cast_fp16)[name = tensor("query_39_cast_fp16")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([1, 1])]; + tensor var_2463 = const()[name = tensor("op_2463"), val = tensor([1, 1])]; + tensor key_39_pad_type_0 = const()[name = tensor("key_39_pad_type_0"), val = tensor("custom")]; + tensor key_39_pad_0 = const()[name = tensor("key_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_19_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765292608)))]; + tensor key_39_cast_fp16 = conv(dilations = var_2463, groups = var_2422, pad = key_39_pad_0, pad_type = key_39_pad_type_0, strides = var_2461, weight = layers_19_self_attn_k_proj_weight_to_fp16, x = obj_77_cast_fp16)[name = tensor("key_39_cast_fp16")]; + tensor var_2468 = const()[name = tensor("op_2468"), val = tensor([1, 1])]; + tensor var_2470 = const()[name = tensor("op_2470"), val = tensor([1, 1])]; + tensor value_39_pad_type_0 = const()[name = tensor("value_39_pad_type_0"), val = tensor("custom")]; + tensor value_39_pad_0 = const()[name = tensor("value_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_19_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768569472)))]; + tensor layers_19_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_19_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(771846336)))]; + tensor value_39_cast_fp16 = conv(bias = layers_19_self_attn_v_proj_bias_to_fp16, dilations = var_2470, groups = var_2422, pad = value_39_pad_0, pad_type = value_39_pad_type_0, strides = var_2468, weight = layers_19_self_attn_v_proj_weight_to_fp16, x = obj_77_cast_fp16)[name = tensor("value_39_cast_fp16")]; + tensor var_2474 = const()[name = tensor("op_2474"), val = tensor([1, 20, 64, -1])]; + tensor var_2475_cast_fp16 = reshape(shape = var_2474, x = query_39_cast_fp16)[name = tensor("op_2475_cast_fp16")]; + tensor var_2476_to_fp16 = const()[name = tensor("op_2476_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2477_cast_fp16 = mul(x = var_2475_cast_fp16, y = var_2476_to_fp16)[name = tensor("op_2477_cast_fp16")]; + tensor var_2478 = const()[name = tensor("op_2478"), val = tensor([1, 20, 64, -1])]; + tensor var_2479_cast_fp16 = reshape(shape = var_2478, x = key_39_cast_fp16)[name = tensor("op_2479_cast_fp16")]; + tensor mh_w_39_transpose_x_0 = const()[name = tensor("mh_w_39_transpose_x_0"), val = tensor(true)]; + tensor mh_w_39_transpose_y_0 = const()[name = tensor("mh_w_39_transpose_y_0"), val = tensor(false)]; + tensor mh_w_39_cast_fp16 = matmul(transpose_x = mh_w_39_transpose_x_0, transpose_y = mh_w_39_transpose_y_0, x = var_2477_cast_fp16, y = var_2479_cast_fp16)[name = tensor("mh_w_39_cast_fp16")]; + tensor var_2482_cast_fp16 = softmax(axis = var_2420, x = mh_w_39_cast_fp16)[name = tensor("op_2482_cast_fp16")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1, 20, 64, -1])]; + tensor var_2484_cast_fp16 = reshape(shape = var_2483, x = value_39_cast_fp16)[name = tensor("op_2484_cast_fp16")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast_fp16 = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2484_cast_fp16, y = var_2482_cast_fp16)[name = tensor("attn_39_cast_fp16")]; + tensor var_2487 = const()[name = tensor("op_2487"), val = tensor([1, 1280, 1, -1])]; + tensor input_153_cast_fp16 = reshape(shape = var_2487, x = attn_39_cast_fp16)[name = tensor("input_153_cast_fp16")]; + tensor var_2491 = const()[name = tensor("op_2491"), val = tensor([1, 1])]; + tensor var_2493 = const()[name = tensor("op_2493"), val = tensor([1, 1])]; + tensor obj_79_pad_type_0 = const()[name = tensor("obj_79_pad_type_0"), val = tensor("custom")]; + tensor obj_79_pad_0 = const()[name = tensor("obj_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_19_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(771848960)))]; + tensor layers_19_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_19_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775125824)))]; + tensor obj_79_cast_fp16 = conv(bias = layers_19_self_attn_o_proj_bias_to_fp16, dilations = var_2493, groups = var_2422, pad = obj_79_pad_0, pad_type = obj_79_pad_type_0, strides = var_2491, weight = layers_19_self_attn_o_proj_weight_to_fp16, x = input_153_cast_fp16)[name = tensor("obj_79_cast_fp16")]; + tensor inputs_79_cast_fp16 = add(x = inputs_77_cast_fp16, y = obj_79_cast_fp16)[name = tensor("inputs_79_cast_fp16")]; + tensor var_2499 = const()[name = tensor("op_2499"), val = tensor([1])]; + tensor channels_mean_79_cast_fp16 = reduce_mean(axes = var_2499, keep_dims = var_2423, x = inputs_79_cast_fp16)[name = tensor("channels_mean_79_cast_fp16")]; + tensor zero_mean_79_cast_fp16 = sub(x = inputs_79_cast_fp16, y = channels_mean_79_cast_fp16)[name = tensor("zero_mean_79_cast_fp16")]; + tensor zero_mean_sq_79_cast_fp16 = mul(x = zero_mean_79_cast_fp16, y = zero_mean_79_cast_fp16)[name = tensor("zero_mean_sq_79_cast_fp16")]; + tensor var_2503 = const()[name = tensor("op_2503"), val = tensor([1])]; + tensor var_2504_cast_fp16 = reduce_mean(axes = var_2503, keep_dims = var_2423, x = zero_mean_sq_79_cast_fp16)[name = tensor("op_2504_cast_fp16")]; + tensor var_2505_to_fp16 = const()[name = tensor("op_2505_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2506_cast_fp16 = add(x = var_2504_cast_fp16, y = var_2505_to_fp16)[name = tensor("op_2506_cast_fp16")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast_fp16 = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2506_cast_fp16)[name = tensor("denom_79_cast_fp16")]; + tensor out_79_cast_fp16 = mul(x = zero_mean_79_cast_fp16, y = denom_79_cast_fp16)[name = tensor("out_79_cast_fp16")]; + tensor input_155_gamma_0_to_fp16 = const()[name = tensor("input_155_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775128448)))]; + tensor input_155_beta_0_to_fp16 = const()[name = tensor("input_155_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775131072)))]; + tensor input_155_epsilon_0_to_fp16 = const()[name = tensor("input_155_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_155_cast_fp16 = batch_norm(beta = input_155_beta_0_to_fp16, epsilon = input_155_epsilon_0_to_fp16, gamma = input_155_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_79_cast_fp16)[name = tensor("input_155_cast_fp16")]; + tensor var_2517 = const()[name = tensor("op_2517"), val = tensor([1, 1])]; + tensor var_2519 = const()[name = tensor("op_2519"), val = tensor([1, 1])]; + tensor input_157_pad_type_0 = const()[name = tensor("input_157_pad_type_0"), val = tensor("custom")]; + tensor input_157_pad_0 = const()[name = tensor("input_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_fc1_weight_to_fp16 = const()[name = tensor("layers_19_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775133696)))]; + tensor layers_19_fc1_bias_to_fp16 = const()[name = tensor("layers_19_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788240960)))]; + tensor input_157_cast_fp16 = conv(bias = layers_19_fc1_bias_to_fp16, dilations = var_2519, groups = var_2422, pad = input_157_pad_0, pad_type = input_157_pad_type_0, strides = var_2517, weight = layers_19_fc1_weight_to_fp16, x = input_155_cast_fp16)[name = tensor("input_157_cast_fp16")]; + tensor input_159_mode_0 = const()[name = tensor("input_159_mode_0"), val = tensor("EXACT")]; + tensor input_159_cast_fp16 = gelu(mode = input_159_mode_0, x = input_157_cast_fp16)[name = tensor("input_159_cast_fp16")]; + tensor var_2525 = const()[name = tensor("op_2525"), val = tensor([1, 1])]; + tensor var_2527 = const()[name = tensor("op_2527"), val = tensor([1, 1])]; + tensor hidden_states_43_pad_type_0 = const()[name = tensor("hidden_states_43_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_43_pad_0 = const()[name = tensor("hidden_states_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_19_fc2_weight_to_fp16 = const()[name = tensor("layers_19_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788251264)))]; + tensor layers_19_fc2_bias_to_fp16 = const()[name = tensor("layers_19_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801358528)))]; + tensor hidden_states_43_cast_fp16 = conv(bias = layers_19_fc2_bias_to_fp16, dilations = var_2527, groups = var_2422, pad = hidden_states_43_pad_0, pad_type = hidden_states_43_pad_type_0, strides = var_2525, weight = layers_19_fc2_weight_to_fp16, x = input_159_cast_fp16)[name = tensor("hidden_states_43_cast_fp16")]; + tensor inputs_81_cast_fp16 = add(x = inputs_79_cast_fp16, y = hidden_states_43_cast_fp16)[name = tensor("inputs_81_cast_fp16")]; + tensor var_2538 = const()[name = tensor("op_2538"), val = tensor(3)]; + tensor var_2540 = const()[name = tensor("op_2540"), val = tensor(1)]; + tensor var_2541 = const()[name = tensor("op_2541"), val = tensor(true)]; + tensor var_2551 = const()[name = tensor("op_2551"), val = tensor([1])]; + tensor channels_mean_81_cast_fp16 = reduce_mean(axes = var_2551, keep_dims = var_2541, x = inputs_81_cast_fp16)[name = tensor("channels_mean_81_cast_fp16")]; + tensor zero_mean_81_cast_fp16 = sub(x = inputs_81_cast_fp16, y = channels_mean_81_cast_fp16)[name = tensor("zero_mean_81_cast_fp16")]; + tensor zero_mean_sq_81_cast_fp16 = mul(x = zero_mean_81_cast_fp16, y = zero_mean_81_cast_fp16)[name = tensor("zero_mean_sq_81_cast_fp16")]; + tensor var_2555 = const()[name = tensor("op_2555"), val = tensor([1])]; + tensor var_2556_cast_fp16 = reduce_mean(axes = var_2555, keep_dims = var_2541, x = zero_mean_sq_81_cast_fp16)[name = tensor("op_2556_cast_fp16")]; + tensor var_2557_to_fp16 = const()[name = tensor("op_2557_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2558_cast_fp16 = add(x = var_2556_cast_fp16, y = var_2557_to_fp16)[name = tensor("op_2558_cast_fp16")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast_fp16 = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2558_cast_fp16)[name = tensor("denom_81_cast_fp16")]; + tensor out_81_cast_fp16 = mul(x = zero_mean_81_cast_fp16, y = denom_81_cast_fp16)[name = tensor("out_81_cast_fp16")]; + tensor obj_81_gamma_0_to_fp16 = const()[name = tensor("obj_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801361152)))]; + tensor obj_81_beta_0_to_fp16 = const()[name = tensor("obj_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801363776)))]; + tensor obj_81_epsilon_0_to_fp16 = const()[name = tensor("obj_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_81_cast_fp16 = batch_norm(beta = obj_81_beta_0_to_fp16, epsilon = obj_81_epsilon_0_to_fp16, gamma = obj_81_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_81_cast_fp16)[name = tensor("obj_81_cast_fp16")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([1, 1])]; + tensor var_2575 = const()[name = tensor("op_2575"), val = tensor([1, 1])]; + tensor query_41_pad_type_0 = const()[name = tensor("query_41_pad_type_0"), val = tensor("custom")]; + tensor query_41_pad_0 = const()[name = tensor("query_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_20_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801366400)))]; + tensor layers_20_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_20_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804643264)))]; + tensor query_41_cast_fp16 = conv(bias = layers_20_self_attn_q_proj_bias_to_fp16, dilations = var_2575, groups = var_2540, pad = query_41_pad_0, pad_type = query_41_pad_type_0, strides = var_2573, weight = layers_20_self_attn_q_proj_weight_to_fp16, x = obj_81_cast_fp16)[name = tensor("query_41_cast_fp16")]; + tensor var_2579 = const()[name = tensor("op_2579"), val = tensor([1, 1])]; + tensor var_2581 = const()[name = tensor("op_2581"), val = tensor([1, 1])]; + tensor key_41_pad_type_0 = const()[name = tensor("key_41_pad_type_0"), val = tensor("custom")]; + tensor key_41_pad_0 = const()[name = tensor("key_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_20_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804645888)))]; + tensor key_41_cast_fp16 = conv(dilations = var_2581, groups = var_2540, pad = key_41_pad_0, pad_type = key_41_pad_type_0, strides = var_2579, weight = layers_20_self_attn_k_proj_weight_to_fp16, x = obj_81_cast_fp16)[name = tensor("key_41_cast_fp16")]; + tensor var_2586 = const()[name = tensor("op_2586"), val = tensor([1, 1])]; + tensor var_2588 = const()[name = tensor("op_2588"), val = tensor([1, 1])]; + tensor value_41_pad_type_0 = const()[name = tensor("value_41_pad_type_0"), val = tensor("custom")]; + tensor value_41_pad_0 = const()[name = tensor("value_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_20_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(807922752)))]; + tensor layers_20_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_20_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811199616)))]; + tensor value_41_cast_fp16 = conv(bias = layers_20_self_attn_v_proj_bias_to_fp16, dilations = var_2588, groups = var_2540, pad = value_41_pad_0, pad_type = value_41_pad_type_0, strides = var_2586, weight = layers_20_self_attn_v_proj_weight_to_fp16, x = obj_81_cast_fp16)[name = tensor("value_41_cast_fp16")]; + tensor var_2592 = const()[name = tensor("op_2592"), val = tensor([1, 20, 64, -1])]; + tensor var_2593_cast_fp16 = reshape(shape = var_2592, x = query_41_cast_fp16)[name = tensor("op_2593_cast_fp16")]; + tensor var_2594_to_fp16 = const()[name = tensor("op_2594_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2595_cast_fp16 = mul(x = var_2593_cast_fp16, y = var_2594_to_fp16)[name = tensor("op_2595_cast_fp16")]; + tensor var_2596 = const()[name = tensor("op_2596"), val = tensor([1, 20, 64, -1])]; + tensor var_2597_cast_fp16 = reshape(shape = var_2596, x = key_41_cast_fp16)[name = tensor("op_2597_cast_fp16")]; + tensor mh_w_41_transpose_x_0 = const()[name = tensor("mh_w_41_transpose_x_0"), val = tensor(true)]; + tensor mh_w_41_transpose_y_0 = const()[name = tensor("mh_w_41_transpose_y_0"), val = tensor(false)]; + tensor mh_w_41_cast_fp16 = matmul(transpose_x = mh_w_41_transpose_x_0, transpose_y = mh_w_41_transpose_y_0, x = var_2595_cast_fp16, y = var_2597_cast_fp16)[name = tensor("mh_w_41_cast_fp16")]; + tensor var_2600_cast_fp16 = softmax(axis = var_2538, x = mh_w_41_cast_fp16)[name = tensor("op_2600_cast_fp16")]; + tensor var_2601 = const()[name = tensor("op_2601"), val = tensor([1, 20, 64, -1])]; + tensor var_2602_cast_fp16 = reshape(shape = var_2601, x = value_41_cast_fp16)[name = tensor("op_2602_cast_fp16")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast_fp16 = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2602_cast_fp16, y = var_2600_cast_fp16)[name = tensor("attn_41_cast_fp16")]; + tensor var_2605 = const()[name = tensor("op_2605"), val = tensor([1, 1280, 1, -1])]; + tensor input_161_cast_fp16 = reshape(shape = var_2605, x = attn_41_cast_fp16)[name = tensor("input_161_cast_fp16")]; + tensor var_2609 = const()[name = tensor("op_2609"), val = tensor([1, 1])]; + tensor var_2611 = const()[name = tensor("op_2611"), val = tensor([1, 1])]; + tensor obj_83_pad_type_0 = const()[name = tensor("obj_83_pad_type_0"), val = tensor("custom")]; + tensor obj_83_pad_0 = const()[name = tensor("obj_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_20_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811202240)))]; + tensor layers_20_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_20_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814479104)))]; + tensor obj_83_cast_fp16 = conv(bias = layers_20_self_attn_o_proj_bias_to_fp16, dilations = var_2611, groups = var_2540, pad = obj_83_pad_0, pad_type = obj_83_pad_type_0, strides = var_2609, weight = layers_20_self_attn_o_proj_weight_to_fp16, x = input_161_cast_fp16)[name = tensor("obj_83_cast_fp16")]; + tensor inputs_83_cast_fp16 = add(x = inputs_81_cast_fp16, y = obj_83_cast_fp16)[name = tensor("inputs_83_cast_fp16")]; + tensor var_2617 = const()[name = tensor("op_2617"), val = tensor([1])]; + tensor channels_mean_83_cast_fp16 = reduce_mean(axes = var_2617, keep_dims = var_2541, x = inputs_83_cast_fp16)[name = tensor("channels_mean_83_cast_fp16")]; + tensor zero_mean_83_cast_fp16 = sub(x = inputs_83_cast_fp16, y = channels_mean_83_cast_fp16)[name = tensor("zero_mean_83_cast_fp16")]; + tensor zero_mean_sq_83_cast_fp16 = mul(x = zero_mean_83_cast_fp16, y = zero_mean_83_cast_fp16)[name = tensor("zero_mean_sq_83_cast_fp16")]; + tensor var_2621 = const()[name = tensor("op_2621"), val = tensor([1])]; + tensor var_2622_cast_fp16 = reduce_mean(axes = var_2621, keep_dims = var_2541, x = zero_mean_sq_83_cast_fp16)[name = tensor("op_2622_cast_fp16")]; + tensor var_2623_to_fp16 = const()[name = tensor("op_2623_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2624_cast_fp16 = add(x = var_2622_cast_fp16, y = var_2623_to_fp16)[name = tensor("op_2624_cast_fp16")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast_fp16 = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_2624_cast_fp16)[name = tensor("denom_83_cast_fp16")]; + tensor out_83_cast_fp16 = mul(x = zero_mean_83_cast_fp16, y = denom_83_cast_fp16)[name = tensor("out_83_cast_fp16")]; + tensor input_163_gamma_0_to_fp16 = const()[name = tensor("input_163_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814481728)))]; + tensor input_163_beta_0_to_fp16 = const()[name = tensor("input_163_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814484352)))]; + tensor input_163_epsilon_0_to_fp16 = const()[name = tensor("input_163_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_163_cast_fp16 = batch_norm(beta = input_163_beta_0_to_fp16, epsilon = input_163_epsilon_0_to_fp16, gamma = input_163_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_83_cast_fp16)[name = tensor("input_163_cast_fp16")]; + tensor var_2635 = const()[name = tensor("op_2635"), val = tensor([1, 1])]; + tensor var_2637 = const()[name = tensor("op_2637"), val = tensor([1, 1])]; + tensor input_165_pad_type_0 = const()[name = tensor("input_165_pad_type_0"), val = tensor("custom")]; + tensor input_165_pad_0 = const()[name = tensor("input_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_fc1_weight_to_fp16 = const()[name = tensor("layers_20_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(814486976)))]; + tensor layers_20_fc1_bias_to_fp16 = const()[name = tensor("layers_20_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827594240)))]; + tensor input_165_cast_fp16 = conv(bias = layers_20_fc1_bias_to_fp16, dilations = var_2637, groups = var_2540, pad = input_165_pad_0, pad_type = input_165_pad_type_0, strides = var_2635, weight = layers_20_fc1_weight_to_fp16, x = input_163_cast_fp16)[name = tensor("input_165_cast_fp16")]; + tensor input_167_mode_0 = const()[name = tensor("input_167_mode_0"), val = tensor("EXACT")]; + tensor input_167_cast_fp16 = gelu(mode = input_167_mode_0, x = input_165_cast_fp16)[name = tensor("input_167_cast_fp16")]; + tensor var_2643 = const()[name = tensor("op_2643"), val = tensor([1, 1])]; + tensor var_2645 = const()[name = tensor("op_2645"), val = tensor([1, 1])]; + tensor hidden_states_45_pad_type_0 = const()[name = tensor("hidden_states_45_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_45_pad_0 = const()[name = tensor("hidden_states_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_20_fc2_weight_to_fp16 = const()[name = tensor("layers_20_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827604544)))]; + tensor layers_20_fc2_bias_to_fp16 = const()[name = tensor("layers_20_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840711808)))]; + tensor hidden_states_45_cast_fp16 = conv(bias = layers_20_fc2_bias_to_fp16, dilations = var_2645, groups = var_2540, pad = hidden_states_45_pad_0, pad_type = hidden_states_45_pad_type_0, strides = var_2643, weight = layers_20_fc2_weight_to_fp16, x = input_167_cast_fp16)[name = tensor("hidden_states_45_cast_fp16")]; + tensor inputs_85_cast_fp16 = add(x = inputs_83_cast_fp16, y = hidden_states_45_cast_fp16)[name = tensor("inputs_85_cast_fp16")]; + tensor var_2656 = const()[name = tensor("op_2656"), val = tensor(3)]; + tensor var_2658 = const()[name = tensor("op_2658"), val = tensor(1)]; + tensor var_2659 = const()[name = tensor("op_2659"), val = tensor(true)]; + tensor var_2669 = const()[name = tensor("op_2669"), val = tensor([1])]; + tensor channels_mean_85_cast_fp16 = reduce_mean(axes = var_2669, keep_dims = var_2659, x = inputs_85_cast_fp16)[name = tensor("channels_mean_85_cast_fp16")]; + tensor zero_mean_85_cast_fp16 = sub(x = inputs_85_cast_fp16, y = channels_mean_85_cast_fp16)[name = tensor("zero_mean_85_cast_fp16")]; + tensor zero_mean_sq_85_cast_fp16 = mul(x = zero_mean_85_cast_fp16, y = zero_mean_85_cast_fp16)[name = tensor("zero_mean_sq_85_cast_fp16")]; + tensor var_2673 = const()[name = tensor("op_2673"), val = tensor([1])]; + tensor var_2674_cast_fp16 = reduce_mean(axes = var_2673, keep_dims = var_2659, x = zero_mean_sq_85_cast_fp16)[name = tensor("op_2674_cast_fp16")]; + tensor var_2675_to_fp16 = const()[name = tensor("op_2675_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2676_cast_fp16 = add(x = var_2674_cast_fp16, y = var_2675_to_fp16)[name = tensor("op_2676_cast_fp16")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast_fp16 = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_2676_cast_fp16)[name = tensor("denom_85_cast_fp16")]; + tensor out_85_cast_fp16 = mul(x = zero_mean_85_cast_fp16, y = denom_85_cast_fp16)[name = tensor("out_85_cast_fp16")]; + tensor obj_85_gamma_0_to_fp16 = const()[name = tensor("obj_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840714432)))]; + tensor obj_85_beta_0_to_fp16 = const()[name = tensor("obj_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840717056)))]; + tensor obj_85_epsilon_0_to_fp16 = const()[name = tensor("obj_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_85_cast_fp16 = batch_norm(beta = obj_85_beta_0_to_fp16, epsilon = obj_85_epsilon_0_to_fp16, gamma = obj_85_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_85_cast_fp16)[name = tensor("obj_85_cast_fp16")]; + tensor var_2691 = const()[name = tensor("op_2691"), val = tensor([1, 1])]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([1, 1])]; + tensor query_43_pad_type_0 = const()[name = tensor("query_43_pad_type_0"), val = tensor("custom")]; + tensor query_43_pad_0 = const()[name = tensor("query_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_21_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840719680)))]; + tensor layers_21_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_21_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843996544)))]; + tensor query_43_cast_fp16 = conv(bias = layers_21_self_attn_q_proj_bias_to_fp16, dilations = var_2693, groups = var_2658, pad = query_43_pad_0, pad_type = query_43_pad_type_0, strides = var_2691, weight = layers_21_self_attn_q_proj_weight_to_fp16, x = obj_85_cast_fp16)[name = tensor("query_43_cast_fp16")]; + tensor var_2697 = const()[name = tensor("op_2697"), val = tensor([1, 1])]; + tensor var_2699 = const()[name = tensor("op_2699"), val = tensor([1, 1])]; + tensor key_43_pad_type_0 = const()[name = tensor("key_43_pad_type_0"), val = tensor("custom")]; + tensor key_43_pad_0 = const()[name = tensor("key_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_21_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843999168)))]; + tensor key_43_cast_fp16 = conv(dilations = var_2699, groups = var_2658, pad = key_43_pad_0, pad_type = key_43_pad_type_0, strides = var_2697, weight = layers_21_self_attn_k_proj_weight_to_fp16, x = obj_85_cast_fp16)[name = tensor("key_43_cast_fp16")]; + tensor var_2704 = const()[name = tensor("op_2704"), val = tensor([1, 1])]; + tensor var_2706 = const()[name = tensor("op_2706"), val = tensor([1, 1])]; + tensor value_43_pad_type_0 = const()[name = tensor("value_43_pad_type_0"), val = tensor("custom")]; + tensor value_43_pad_0 = const()[name = tensor("value_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_21_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847276032)))]; + tensor layers_21_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_21_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850552896)))]; + tensor value_43_cast_fp16 = conv(bias = layers_21_self_attn_v_proj_bias_to_fp16, dilations = var_2706, groups = var_2658, pad = value_43_pad_0, pad_type = value_43_pad_type_0, strides = var_2704, weight = layers_21_self_attn_v_proj_weight_to_fp16, x = obj_85_cast_fp16)[name = tensor("value_43_cast_fp16")]; + tensor var_2710 = const()[name = tensor("op_2710"), val = tensor([1, 20, 64, -1])]; + tensor var_2711_cast_fp16 = reshape(shape = var_2710, x = query_43_cast_fp16)[name = tensor("op_2711_cast_fp16")]; + tensor var_2712_to_fp16 = const()[name = tensor("op_2712_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2713_cast_fp16 = mul(x = var_2711_cast_fp16, y = var_2712_to_fp16)[name = tensor("op_2713_cast_fp16")]; + tensor var_2714 = const()[name = tensor("op_2714"), val = tensor([1, 20, 64, -1])]; + tensor var_2715_cast_fp16 = reshape(shape = var_2714, x = key_43_cast_fp16)[name = tensor("op_2715_cast_fp16")]; + tensor mh_w_43_transpose_x_0 = const()[name = tensor("mh_w_43_transpose_x_0"), val = tensor(true)]; + tensor mh_w_43_transpose_y_0 = const()[name = tensor("mh_w_43_transpose_y_0"), val = tensor(false)]; + tensor mh_w_43_cast_fp16 = matmul(transpose_x = mh_w_43_transpose_x_0, transpose_y = mh_w_43_transpose_y_0, x = var_2713_cast_fp16, y = var_2715_cast_fp16)[name = tensor("mh_w_43_cast_fp16")]; + tensor var_2718_cast_fp16 = softmax(axis = var_2656, x = mh_w_43_cast_fp16)[name = tensor("op_2718_cast_fp16")]; + tensor var_2719 = const()[name = tensor("op_2719"), val = tensor([1, 20, 64, -1])]; + tensor var_2720_cast_fp16 = reshape(shape = var_2719, x = value_43_cast_fp16)[name = tensor("op_2720_cast_fp16")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast_fp16 = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2720_cast_fp16, y = var_2718_cast_fp16)[name = tensor("attn_43_cast_fp16")]; + tensor var_2723 = const()[name = tensor("op_2723"), val = tensor([1, 1280, 1, -1])]; + tensor input_169_cast_fp16 = reshape(shape = var_2723, x = attn_43_cast_fp16)[name = tensor("input_169_cast_fp16")]; + tensor var_2727 = const()[name = tensor("op_2727"), val = tensor([1, 1])]; + tensor var_2729 = const()[name = tensor("op_2729"), val = tensor([1, 1])]; + tensor obj_87_pad_type_0 = const()[name = tensor("obj_87_pad_type_0"), val = tensor("custom")]; + tensor obj_87_pad_0 = const()[name = tensor("obj_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_21_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850555520)))]; + tensor layers_21_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_21_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(853832384)))]; + tensor obj_87_cast_fp16 = conv(bias = layers_21_self_attn_o_proj_bias_to_fp16, dilations = var_2729, groups = var_2658, pad = obj_87_pad_0, pad_type = obj_87_pad_type_0, strides = var_2727, weight = layers_21_self_attn_o_proj_weight_to_fp16, x = input_169_cast_fp16)[name = tensor("obj_87_cast_fp16")]; + tensor inputs_87_cast_fp16 = add(x = inputs_85_cast_fp16, y = obj_87_cast_fp16)[name = tensor("inputs_87_cast_fp16")]; + tensor var_2735 = const()[name = tensor("op_2735"), val = tensor([1])]; + tensor channels_mean_87_cast_fp16 = reduce_mean(axes = var_2735, keep_dims = var_2659, x = inputs_87_cast_fp16)[name = tensor("channels_mean_87_cast_fp16")]; + tensor zero_mean_87_cast_fp16 = sub(x = inputs_87_cast_fp16, y = channels_mean_87_cast_fp16)[name = tensor("zero_mean_87_cast_fp16")]; + tensor zero_mean_sq_87_cast_fp16 = mul(x = zero_mean_87_cast_fp16, y = zero_mean_87_cast_fp16)[name = tensor("zero_mean_sq_87_cast_fp16")]; + tensor var_2739 = const()[name = tensor("op_2739"), val = tensor([1])]; + tensor var_2740_cast_fp16 = reduce_mean(axes = var_2739, keep_dims = var_2659, x = zero_mean_sq_87_cast_fp16)[name = tensor("op_2740_cast_fp16")]; + tensor var_2741_to_fp16 = const()[name = tensor("op_2741_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2742_cast_fp16 = add(x = var_2740_cast_fp16, y = var_2741_to_fp16)[name = tensor("op_2742_cast_fp16")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast_fp16 = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_2742_cast_fp16)[name = tensor("denom_87_cast_fp16")]; + tensor out_87_cast_fp16 = mul(x = zero_mean_87_cast_fp16, y = denom_87_cast_fp16)[name = tensor("out_87_cast_fp16")]; + tensor input_171_gamma_0_to_fp16 = const()[name = tensor("input_171_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(853835008)))]; + tensor input_171_beta_0_to_fp16 = const()[name = tensor("input_171_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(853837632)))]; + tensor input_171_epsilon_0_to_fp16 = const()[name = tensor("input_171_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_171_cast_fp16 = batch_norm(beta = input_171_beta_0_to_fp16, epsilon = input_171_epsilon_0_to_fp16, gamma = input_171_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_87_cast_fp16)[name = tensor("input_171_cast_fp16")]; + tensor var_2753 = const()[name = tensor("op_2753"), val = tensor([1, 1])]; + tensor var_2755 = const()[name = tensor("op_2755"), val = tensor([1, 1])]; + tensor input_173_pad_type_0 = const()[name = tensor("input_173_pad_type_0"), val = tensor("custom")]; + tensor input_173_pad_0 = const()[name = tensor("input_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_fc1_weight_to_fp16 = const()[name = tensor("layers_21_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(853840256)))]; + tensor layers_21_fc1_bias_to_fp16 = const()[name = tensor("layers_21_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866947520)))]; + tensor input_173_cast_fp16 = conv(bias = layers_21_fc1_bias_to_fp16, dilations = var_2755, groups = var_2658, pad = input_173_pad_0, pad_type = input_173_pad_type_0, strides = var_2753, weight = layers_21_fc1_weight_to_fp16, x = input_171_cast_fp16)[name = tensor("input_173_cast_fp16")]; + tensor input_175_mode_0 = const()[name = tensor("input_175_mode_0"), val = tensor("EXACT")]; + tensor input_175_cast_fp16 = gelu(mode = input_175_mode_0, x = input_173_cast_fp16)[name = tensor("input_175_cast_fp16")]; + tensor var_2761 = const()[name = tensor("op_2761"), val = tensor([1, 1])]; + tensor var_2763 = const()[name = tensor("op_2763"), val = tensor([1, 1])]; + tensor hidden_states_47_pad_type_0 = const()[name = tensor("hidden_states_47_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_47_pad_0 = const()[name = tensor("hidden_states_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_21_fc2_weight_to_fp16 = const()[name = tensor("layers_21_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866957824)))]; + tensor layers_21_fc2_bias_to_fp16 = const()[name = tensor("layers_21_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880065088)))]; + tensor hidden_states_47_cast_fp16 = conv(bias = layers_21_fc2_bias_to_fp16, dilations = var_2763, groups = var_2658, pad = hidden_states_47_pad_0, pad_type = hidden_states_47_pad_type_0, strides = var_2761, weight = layers_21_fc2_weight_to_fp16, x = input_175_cast_fp16)[name = tensor("hidden_states_47_cast_fp16")]; + tensor inputs_89_cast_fp16 = add(x = inputs_87_cast_fp16, y = hidden_states_47_cast_fp16)[name = tensor("inputs_89_cast_fp16")]; + tensor var_2774 = const()[name = tensor("op_2774"), val = tensor(3)]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor(1)]; + tensor var_2777 = const()[name = tensor("op_2777"), val = tensor(true)]; + tensor var_2787 = const()[name = tensor("op_2787"), val = tensor([1])]; + tensor channels_mean_89_cast_fp16 = reduce_mean(axes = var_2787, keep_dims = var_2777, x = inputs_89_cast_fp16)[name = tensor("channels_mean_89_cast_fp16")]; + tensor zero_mean_89_cast_fp16 = sub(x = inputs_89_cast_fp16, y = channels_mean_89_cast_fp16)[name = tensor("zero_mean_89_cast_fp16")]; + tensor zero_mean_sq_89_cast_fp16 = mul(x = zero_mean_89_cast_fp16, y = zero_mean_89_cast_fp16)[name = tensor("zero_mean_sq_89_cast_fp16")]; + tensor var_2791 = const()[name = tensor("op_2791"), val = tensor([1])]; + tensor var_2792_cast_fp16 = reduce_mean(axes = var_2791, keep_dims = var_2777, x = zero_mean_sq_89_cast_fp16)[name = tensor("op_2792_cast_fp16")]; + tensor var_2793_to_fp16 = const()[name = tensor("op_2793_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2794_cast_fp16 = add(x = var_2792_cast_fp16, y = var_2793_to_fp16)[name = tensor("op_2794_cast_fp16")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast_fp16 = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_2794_cast_fp16)[name = tensor("denom_89_cast_fp16")]; + tensor out_89_cast_fp16 = mul(x = zero_mean_89_cast_fp16, y = denom_89_cast_fp16)[name = tensor("out_89_cast_fp16")]; + tensor obj_89_gamma_0_to_fp16 = const()[name = tensor("obj_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880067712)))]; + tensor obj_89_beta_0_to_fp16 = const()[name = tensor("obj_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880070336)))]; + tensor obj_89_epsilon_0_to_fp16 = const()[name = tensor("obj_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_89_cast_fp16 = batch_norm(beta = obj_89_beta_0_to_fp16, epsilon = obj_89_epsilon_0_to_fp16, gamma = obj_89_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_89_cast_fp16)[name = tensor("obj_89_cast_fp16")]; + tensor var_2809 = const()[name = tensor("op_2809"), val = tensor([1, 1])]; + tensor var_2811 = const()[name = tensor("op_2811"), val = tensor([1, 1])]; + tensor query_45_pad_type_0 = const()[name = tensor("query_45_pad_type_0"), val = tensor("custom")]; + tensor query_45_pad_0 = const()[name = tensor("query_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_22_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880072960)))]; + tensor layers_22_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_22_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(883349824)))]; + tensor query_45_cast_fp16 = conv(bias = layers_22_self_attn_q_proj_bias_to_fp16, dilations = var_2811, groups = var_2776, pad = query_45_pad_0, pad_type = query_45_pad_type_0, strides = var_2809, weight = layers_22_self_attn_q_proj_weight_to_fp16, x = obj_89_cast_fp16)[name = tensor("query_45_cast_fp16")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([1, 1])]; + tensor var_2817 = const()[name = tensor("op_2817"), val = tensor([1, 1])]; + tensor key_45_pad_type_0 = const()[name = tensor("key_45_pad_type_0"), val = tensor("custom")]; + tensor key_45_pad_0 = const()[name = tensor("key_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_22_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(883352448)))]; + tensor key_45_cast_fp16 = conv(dilations = var_2817, groups = var_2776, pad = key_45_pad_0, pad_type = key_45_pad_type_0, strides = var_2815, weight = layers_22_self_attn_k_proj_weight_to_fp16, x = obj_89_cast_fp16)[name = tensor("key_45_cast_fp16")]; + tensor var_2822 = const()[name = tensor("op_2822"), val = tensor([1, 1])]; + tensor var_2824 = const()[name = tensor("op_2824"), val = tensor([1, 1])]; + tensor value_45_pad_type_0 = const()[name = tensor("value_45_pad_type_0"), val = tensor("custom")]; + tensor value_45_pad_0 = const()[name = tensor("value_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_22_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886629312)))]; + tensor layers_22_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_22_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889906176)))]; + tensor value_45_cast_fp16 = conv(bias = layers_22_self_attn_v_proj_bias_to_fp16, dilations = var_2824, groups = var_2776, pad = value_45_pad_0, pad_type = value_45_pad_type_0, strides = var_2822, weight = layers_22_self_attn_v_proj_weight_to_fp16, x = obj_89_cast_fp16)[name = tensor("value_45_cast_fp16")]; + tensor var_2828 = const()[name = tensor("op_2828"), val = tensor([1, 20, 64, -1])]; + tensor var_2829_cast_fp16 = reshape(shape = var_2828, x = query_45_cast_fp16)[name = tensor("op_2829_cast_fp16")]; + tensor var_2830_to_fp16 = const()[name = tensor("op_2830_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2831_cast_fp16 = mul(x = var_2829_cast_fp16, y = var_2830_to_fp16)[name = tensor("op_2831_cast_fp16")]; + tensor var_2832 = const()[name = tensor("op_2832"), val = tensor([1, 20, 64, -1])]; + tensor var_2833_cast_fp16 = reshape(shape = var_2832, x = key_45_cast_fp16)[name = tensor("op_2833_cast_fp16")]; + tensor mh_w_45_transpose_x_0 = const()[name = tensor("mh_w_45_transpose_x_0"), val = tensor(true)]; + tensor mh_w_45_transpose_y_0 = const()[name = tensor("mh_w_45_transpose_y_0"), val = tensor(false)]; + tensor mh_w_45_cast_fp16 = matmul(transpose_x = mh_w_45_transpose_x_0, transpose_y = mh_w_45_transpose_y_0, x = var_2831_cast_fp16, y = var_2833_cast_fp16)[name = tensor("mh_w_45_cast_fp16")]; + tensor var_2836_cast_fp16 = softmax(axis = var_2774, x = mh_w_45_cast_fp16)[name = tensor("op_2836_cast_fp16")]; + tensor var_2837 = const()[name = tensor("op_2837"), val = tensor([1, 20, 64, -1])]; + tensor var_2838_cast_fp16 = reshape(shape = var_2837, x = value_45_cast_fp16)[name = tensor("op_2838_cast_fp16")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast_fp16 = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2838_cast_fp16, y = var_2836_cast_fp16)[name = tensor("attn_45_cast_fp16")]; + tensor var_2841 = const()[name = tensor("op_2841"), val = tensor([1, 1280, 1, -1])]; + tensor input_177_cast_fp16 = reshape(shape = var_2841, x = attn_45_cast_fp16)[name = tensor("input_177_cast_fp16")]; + tensor var_2845 = const()[name = tensor("op_2845"), val = tensor([1, 1])]; + tensor var_2847 = const()[name = tensor("op_2847"), val = tensor([1, 1])]; + tensor obj_91_pad_type_0 = const()[name = tensor("obj_91_pad_type_0"), val = tensor("custom")]; + tensor obj_91_pad_0 = const()[name = tensor("obj_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_22_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889908800)))]; + tensor layers_22_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_22_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893185664)))]; + tensor obj_91_cast_fp16 = conv(bias = layers_22_self_attn_o_proj_bias_to_fp16, dilations = var_2847, groups = var_2776, pad = obj_91_pad_0, pad_type = obj_91_pad_type_0, strides = var_2845, weight = layers_22_self_attn_o_proj_weight_to_fp16, x = input_177_cast_fp16)[name = tensor("obj_91_cast_fp16")]; + tensor inputs_91_cast_fp16 = add(x = inputs_89_cast_fp16, y = obj_91_cast_fp16)[name = tensor("inputs_91_cast_fp16")]; + tensor var_2853 = const()[name = tensor("op_2853"), val = tensor([1])]; + tensor channels_mean_91_cast_fp16 = reduce_mean(axes = var_2853, keep_dims = var_2777, x = inputs_91_cast_fp16)[name = tensor("channels_mean_91_cast_fp16")]; + tensor zero_mean_91_cast_fp16 = sub(x = inputs_91_cast_fp16, y = channels_mean_91_cast_fp16)[name = tensor("zero_mean_91_cast_fp16")]; + tensor zero_mean_sq_91_cast_fp16 = mul(x = zero_mean_91_cast_fp16, y = zero_mean_91_cast_fp16)[name = tensor("zero_mean_sq_91_cast_fp16")]; + tensor var_2857 = const()[name = tensor("op_2857"), val = tensor([1])]; + tensor var_2858_cast_fp16 = reduce_mean(axes = var_2857, keep_dims = var_2777, x = zero_mean_sq_91_cast_fp16)[name = tensor("op_2858_cast_fp16")]; + tensor var_2859_to_fp16 = const()[name = tensor("op_2859_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2860_cast_fp16 = add(x = var_2858_cast_fp16, y = var_2859_to_fp16)[name = tensor("op_2860_cast_fp16")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast_fp16 = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_2860_cast_fp16)[name = tensor("denom_91_cast_fp16")]; + tensor out_91_cast_fp16 = mul(x = zero_mean_91_cast_fp16, y = denom_91_cast_fp16)[name = tensor("out_91_cast_fp16")]; + tensor input_179_gamma_0_to_fp16 = const()[name = tensor("input_179_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893188288)))]; + tensor input_179_beta_0_to_fp16 = const()[name = tensor("input_179_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893190912)))]; + tensor input_179_epsilon_0_to_fp16 = const()[name = tensor("input_179_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_179_cast_fp16 = batch_norm(beta = input_179_beta_0_to_fp16, epsilon = input_179_epsilon_0_to_fp16, gamma = input_179_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_91_cast_fp16)[name = tensor("input_179_cast_fp16")]; + tensor var_2871 = const()[name = tensor("op_2871"), val = tensor([1, 1])]; + tensor var_2873 = const()[name = tensor("op_2873"), val = tensor([1, 1])]; + tensor input_181_pad_type_0 = const()[name = tensor("input_181_pad_type_0"), val = tensor("custom")]; + tensor input_181_pad_0 = const()[name = tensor("input_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_fc1_weight_to_fp16 = const()[name = tensor("layers_22_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893193536)))]; + tensor layers_22_fc1_bias_to_fp16 = const()[name = tensor("layers_22_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(906300800)))]; + tensor input_181_cast_fp16 = conv(bias = layers_22_fc1_bias_to_fp16, dilations = var_2873, groups = var_2776, pad = input_181_pad_0, pad_type = input_181_pad_type_0, strides = var_2871, weight = layers_22_fc1_weight_to_fp16, x = input_179_cast_fp16)[name = tensor("input_181_cast_fp16")]; + tensor input_183_mode_0 = const()[name = tensor("input_183_mode_0"), val = tensor("EXACT")]; + tensor input_183_cast_fp16 = gelu(mode = input_183_mode_0, x = input_181_cast_fp16)[name = tensor("input_183_cast_fp16")]; + tensor var_2879 = const()[name = tensor("op_2879"), val = tensor([1, 1])]; + tensor var_2881 = const()[name = tensor("op_2881"), val = tensor([1, 1])]; + tensor hidden_states_49_pad_type_0 = const()[name = tensor("hidden_states_49_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_49_pad_0 = const()[name = tensor("hidden_states_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_22_fc2_weight_to_fp16 = const()[name = tensor("layers_22_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(906311104)))]; + tensor layers_22_fc2_bias_to_fp16 = const()[name = tensor("layers_22_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(919418368)))]; + tensor hidden_states_49_cast_fp16 = conv(bias = layers_22_fc2_bias_to_fp16, dilations = var_2881, groups = var_2776, pad = hidden_states_49_pad_0, pad_type = hidden_states_49_pad_type_0, strides = var_2879, weight = layers_22_fc2_weight_to_fp16, x = input_183_cast_fp16)[name = tensor("hidden_states_49_cast_fp16")]; + tensor inputs_93_cast_fp16 = add(x = inputs_91_cast_fp16, y = hidden_states_49_cast_fp16)[name = tensor("inputs_93_cast_fp16")]; + tensor var_2892 = const()[name = tensor("op_2892"), val = tensor(3)]; + tensor var_2894 = const()[name = tensor("op_2894"), val = tensor(1)]; + tensor var_2895 = const()[name = tensor("op_2895"), val = tensor(true)]; + tensor var_2905 = const()[name = tensor("op_2905"), val = tensor([1])]; + tensor channels_mean_93_cast_fp16 = reduce_mean(axes = var_2905, keep_dims = var_2895, x = inputs_93_cast_fp16)[name = tensor("channels_mean_93_cast_fp16")]; + tensor zero_mean_93_cast_fp16 = sub(x = inputs_93_cast_fp16, y = channels_mean_93_cast_fp16)[name = tensor("zero_mean_93_cast_fp16")]; + tensor zero_mean_sq_93_cast_fp16 = mul(x = zero_mean_93_cast_fp16, y = zero_mean_93_cast_fp16)[name = tensor("zero_mean_sq_93_cast_fp16")]; + tensor var_2909 = const()[name = tensor("op_2909"), val = tensor([1])]; + tensor var_2910_cast_fp16 = reduce_mean(axes = var_2909, keep_dims = var_2895, x = zero_mean_sq_93_cast_fp16)[name = tensor("op_2910_cast_fp16")]; + tensor var_2911_to_fp16 = const()[name = tensor("op_2911_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2912_cast_fp16 = add(x = var_2910_cast_fp16, y = var_2911_to_fp16)[name = tensor("op_2912_cast_fp16")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast_fp16 = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_2912_cast_fp16)[name = tensor("denom_93_cast_fp16")]; + tensor out_93_cast_fp16 = mul(x = zero_mean_93_cast_fp16, y = denom_93_cast_fp16)[name = tensor("out_93_cast_fp16")]; + tensor obj_93_gamma_0_to_fp16 = const()[name = tensor("obj_93_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(919420992)))]; + tensor obj_93_beta_0_to_fp16 = const()[name = tensor("obj_93_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(919423616)))]; + tensor obj_93_epsilon_0_to_fp16 = const()[name = tensor("obj_93_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_93_cast_fp16 = batch_norm(beta = obj_93_beta_0_to_fp16, epsilon = obj_93_epsilon_0_to_fp16, gamma = obj_93_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_93_cast_fp16)[name = tensor("obj_93_cast_fp16")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([1, 1])]; + tensor var_2929 = const()[name = tensor("op_2929"), val = tensor([1, 1])]; + tensor query_47_pad_type_0 = const()[name = tensor("query_47_pad_type_0"), val = tensor("custom")]; + tensor query_47_pad_0 = const()[name = tensor("query_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_23_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(919426240)))]; + tensor layers_23_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_23_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922703104)))]; + tensor query_47_cast_fp16 = conv(bias = layers_23_self_attn_q_proj_bias_to_fp16, dilations = var_2929, groups = var_2894, pad = query_47_pad_0, pad_type = query_47_pad_type_0, strides = var_2927, weight = layers_23_self_attn_q_proj_weight_to_fp16, x = obj_93_cast_fp16)[name = tensor("query_47_cast_fp16")]; + tensor var_2933 = const()[name = tensor("op_2933"), val = tensor([1, 1])]; + tensor var_2935 = const()[name = tensor("op_2935"), val = tensor([1, 1])]; + tensor key_47_pad_type_0 = const()[name = tensor("key_47_pad_type_0"), val = tensor("custom")]; + tensor key_47_pad_0 = const()[name = tensor("key_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_23_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922705728)))]; + tensor key_47_cast_fp16 = conv(dilations = var_2935, groups = var_2894, pad = key_47_pad_0, pad_type = key_47_pad_type_0, strides = var_2933, weight = layers_23_self_attn_k_proj_weight_to_fp16, x = obj_93_cast_fp16)[name = tensor("key_47_cast_fp16")]; + tensor var_2940 = const()[name = tensor("op_2940"), val = tensor([1, 1])]; + tensor var_2942 = const()[name = tensor("op_2942"), val = tensor([1, 1])]; + tensor value_47_pad_type_0 = const()[name = tensor("value_47_pad_type_0"), val = tensor("custom")]; + tensor value_47_pad_0 = const()[name = tensor("value_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_23_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925982592)))]; + tensor layers_23_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_23_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(929259456)))]; + tensor value_47_cast_fp16 = conv(bias = layers_23_self_attn_v_proj_bias_to_fp16, dilations = var_2942, groups = var_2894, pad = value_47_pad_0, pad_type = value_47_pad_type_0, strides = var_2940, weight = layers_23_self_attn_v_proj_weight_to_fp16, x = obj_93_cast_fp16)[name = tensor("value_47_cast_fp16")]; + tensor var_2946 = const()[name = tensor("op_2946"), val = tensor([1, 20, 64, -1])]; + tensor var_2947_cast_fp16 = reshape(shape = var_2946, x = query_47_cast_fp16)[name = tensor("op_2947_cast_fp16")]; + tensor var_2948_to_fp16 = const()[name = tensor("op_2948_to_fp16"), val = tensor(0x1p-3)]; + tensor var_2949_cast_fp16 = mul(x = var_2947_cast_fp16, y = var_2948_to_fp16)[name = tensor("op_2949_cast_fp16")]; + tensor var_2950 = const()[name = tensor("op_2950"), val = tensor([1, 20, 64, -1])]; + tensor var_2951_cast_fp16 = reshape(shape = var_2950, x = key_47_cast_fp16)[name = tensor("op_2951_cast_fp16")]; + tensor mh_w_47_transpose_x_0 = const()[name = tensor("mh_w_47_transpose_x_0"), val = tensor(true)]; + tensor mh_w_47_transpose_y_0 = const()[name = tensor("mh_w_47_transpose_y_0"), val = tensor(false)]; + tensor mh_w_47_cast_fp16 = matmul(transpose_x = mh_w_47_transpose_x_0, transpose_y = mh_w_47_transpose_y_0, x = var_2949_cast_fp16, y = var_2951_cast_fp16)[name = tensor("mh_w_47_cast_fp16")]; + tensor var_2954_cast_fp16 = softmax(axis = var_2892, x = mh_w_47_cast_fp16)[name = tensor("op_2954_cast_fp16")]; + tensor var_2955 = const()[name = tensor("op_2955"), val = tensor([1, 20, 64, -1])]; + tensor var_2956_cast_fp16 = reshape(shape = var_2955, x = value_47_cast_fp16)[name = tensor("op_2956_cast_fp16")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast_fp16 = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2956_cast_fp16, y = var_2954_cast_fp16)[name = tensor("attn_47_cast_fp16")]; + tensor var_2959 = const()[name = tensor("op_2959"), val = tensor([1, 1280, 1, -1])]; + tensor input_185_cast_fp16 = reshape(shape = var_2959, x = attn_47_cast_fp16)[name = tensor("input_185_cast_fp16")]; + tensor var_2963 = const()[name = tensor("op_2963"), val = tensor([1, 1])]; + tensor var_2965 = const()[name = tensor("op_2965"), val = tensor([1, 1])]; + tensor obj_95_pad_type_0 = const()[name = tensor("obj_95_pad_type_0"), val = tensor("custom")]; + tensor obj_95_pad_0 = const()[name = tensor("obj_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_23_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(929262080)))]; + tensor layers_23_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_23_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932538944)))]; + tensor obj_95_cast_fp16 = conv(bias = layers_23_self_attn_o_proj_bias_to_fp16, dilations = var_2965, groups = var_2894, pad = obj_95_pad_0, pad_type = obj_95_pad_type_0, strides = var_2963, weight = layers_23_self_attn_o_proj_weight_to_fp16, x = input_185_cast_fp16)[name = tensor("obj_95_cast_fp16")]; + tensor inputs_95_cast_fp16 = add(x = inputs_93_cast_fp16, y = obj_95_cast_fp16)[name = tensor("inputs_95_cast_fp16")]; + tensor var_2971 = const()[name = tensor("op_2971"), val = tensor([1])]; + tensor channels_mean_95_cast_fp16 = reduce_mean(axes = var_2971, keep_dims = var_2895, x = inputs_95_cast_fp16)[name = tensor("channels_mean_95_cast_fp16")]; + tensor zero_mean_95_cast_fp16 = sub(x = inputs_95_cast_fp16, y = channels_mean_95_cast_fp16)[name = tensor("zero_mean_95_cast_fp16")]; + tensor zero_mean_sq_95_cast_fp16 = mul(x = zero_mean_95_cast_fp16, y = zero_mean_95_cast_fp16)[name = tensor("zero_mean_sq_95_cast_fp16")]; + tensor var_2975 = const()[name = tensor("op_2975"), val = tensor([1])]; + tensor var_2976_cast_fp16 = reduce_mean(axes = var_2975, keep_dims = var_2895, x = zero_mean_sq_95_cast_fp16)[name = tensor("op_2976_cast_fp16")]; + tensor var_2977_to_fp16 = const()[name = tensor("op_2977_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2978_cast_fp16 = add(x = var_2976_cast_fp16, y = var_2977_to_fp16)[name = tensor("op_2978_cast_fp16")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast_fp16 = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_2978_cast_fp16)[name = tensor("denom_95_cast_fp16")]; + tensor out_95_cast_fp16 = mul(x = zero_mean_95_cast_fp16, y = denom_95_cast_fp16)[name = tensor("out_95_cast_fp16")]; + tensor input_187_gamma_0_to_fp16 = const()[name = tensor("input_187_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932541568)))]; + tensor input_187_beta_0_to_fp16 = const()[name = tensor("input_187_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932544192)))]; + tensor input_187_epsilon_0_to_fp16 = const()[name = tensor("input_187_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_187_cast_fp16 = batch_norm(beta = input_187_beta_0_to_fp16, epsilon = input_187_epsilon_0_to_fp16, gamma = input_187_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_95_cast_fp16)[name = tensor("input_187_cast_fp16")]; + tensor var_2989 = const()[name = tensor("op_2989"), val = tensor([1, 1])]; + tensor var_2991 = const()[name = tensor("op_2991"), val = tensor([1, 1])]; + tensor input_189_pad_type_0 = const()[name = tensor("input_189_pad_type_0"), val = tensor("custom")]; + tensor input_189_pad_0 = const()[name = tensor("input_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_fc1_weight_to_fp16 = const()[name = tensor("layers_23_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932546816)))]; + tensor layers_23_fc1_bias_to_fp16 = const()[name = tensor("layers_23_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(945654080)))]; + tensor input_189_cast_fp16 = conv(bias = layers_23_fc1_bias_to_fp16, dilations = var_2991, groups = var_2894, pad = input_189_pad_0, pad_type = input_189_pad_type_0, strides = var_2989, weight = layers_23_fc1_weight_to_fp16, x = input_187_cast_fp16)[name = tensor("input_189_cast_fp16")]; + tensor input_191_mode_0 = const()[name = tensor("input_191_mode_0"), val = tensor("EXACT")]; + tensor input_191_cast_fp16 = gelu(mode = input_191_mode_0, x = input_189_cast_fp16)[name = tensor("input_191_cast_fp16")]; + tensor var_2997 = const()[name = tensor("op_2997"), val = tensor([1, 1])]; + tensor var_2999 = const()[name = tensor("op_2999"), val = tensor([1, 1])]; + tensor hidden_states_51_pad_type_0 = const()[name = tensor("hidden_states_51_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_51_pad_0 = const()[name = tensor("hidden_states_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_23_fc2_weight_to_fp16 = const()[name = tensor("layers_23_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(945664384)))]; + tensor layers_23_fc2_bias_to_fp16 = const()[name = tensor("layers_23_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(958771648)))]; + tensor hidden_states_51_cast_fp16 = conv(bias = layers_23_fc2_bias_to_fp16, dilations = var_2999, groups = var_2894, pad = hidden_states_51_pad_0, pad_type = hidden_states_51_pad_type_0, strides = var_2997, weight = layers_23_fc2_weight_to_fp16, x = input_191_cast_fp16)[name = tensor("hidden_states_51_cast_fp16")]; + tensor inputs_97_cast_fp16 = add(x = inputs_95_cast_fp16, y = hidden_states_51_cast_fp16)[name = tensor("inputs_97_cast_fp16")]; + tensor var_3010 = const()[name = tensor("op_3010"), val = tensor(3)]; + tensor var_3012 = const()[name = tensor("op_3012"), val = tensor(1)]; + tensor var_3013 = const()[name = tensor("op_3013"), val = tensor(true)]; + tensor var_3023 = const()[name = tensor("op_3023"), val = tensor([1])]; + tensor channels_mean_97_cast_fp16 = reduce_mean(axes = var_3023, keep_dims = var_3013, x = inputs_97_cast_fp16)[name = tensor("channels_mean_97_cast_fp16")]; + tensor zero_mean_97_cast_fp16 = sub(x = inputs_97_cast_fp16, y = channels_mean_97_cast_fp16)[name = tensor("zero_mean_97_cast_fp16")]; + tensor zero_mean_sq_97_cast_fp16 = mul(x = zero_mean_97_cast_fp16, y = zero_mean_97_cast_fp16)[name = tensor("zero_mean_sq_97_cast_fp16")]; + tensor var_3027 = const()[name = tensor("op_3027"), val = tensor([1])]; + tensor var_3028_cast_fp16 = reduce_mean(axes = var_3027, keep_dims = var_3013, x = zero_mean_sq_97_cast_fp16)[name = tensor("op_3028_cast_fp16")]; + tensor var_3029_to_fp16 = const()[name = tensor("op_3029_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3030_cast_fp16 = add(x = var_3028_cast_fp16, y = var_3029_to_fp16)[name = tensor("op_3030_cast_fp16")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast_fp16 = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3030_cast_fp16)[name = tensor("denom_97_cast_fp16")]; + tensor out_97_cast_fp16 = mul(x = zero_mean_97_cast_fp16, y = denom_97_cast_fp16)[name = tensor("out_97_cast_fp16")]; + tensor obj_97_gamma_0_to_fp16 = const()[name = tensor("obj_97_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(958774272)))]; + tensor obj_97_beta_0_to_fp16 = const()[name = tensor("obj_97_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(958776896)))]; + tensor obj_97_epsilon_0_to_fp16 = const()[name = tensor("obj_97_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_97_cast_fp16 = batch_norm(beta = obj_97_beta_0_to_fp16, epsilon = obj_97_epsilon_0_to_fp16, gamma = obj_97_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_97_cast_fp16)[name = tensor("obj_97_cast_fp16")]; + tensor var_3045 = const()[name = tensor("op_3045"), val = tensor([1, 1])]; + tensor var_3047 = const()[name = tensor("op_3047"), val = tensor([1, 1])]; + tensor query_49_pad_type_0 = const()[name = tensor("query_49_pad_type_0"), val = tensor("custom")]; + tensor query_49_pad_0 = const()[name = tensor("query_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_24_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(958779520)))]; + tensor layers_24_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_24_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(962056384)))]; + tensor query_49_cast_fp16 = conv(bias = layers_24_self_attn_q_proj_bias_to_fp16, dilations = var_3047, groups = var_3012, pad = query_49_pad_0, pad_type = query_49_pad_type_0, strides = var_3045, weight = layers_24_self_attn_q_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = tensor("query_49_cast_fp16")]; + tensor var_3051 = const()[name = tensor("op_3051"), val = tensor([1, 1])]; + tensor var_3053 = const()[name = tensor("op_3053"), val = tensor([1, 1])]; + tensor key_49_pad_type_0 = const()[name = tensor("key_49_pad_type_0"), val = tensor("custom")]; + tensor key_49_pad_0 = const()[name = tensor("key_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_24_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(962059008)))]; + tensor key_49_cast_fp16 = conv(dilations = var_3053, groups = var_3012, pad = key_49_pad_0, pad_type = key_49_pad_type_0, strides = var_3051, weight = layers_24_self_attn_k_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = tensor("key_49_cast_fp16")]; + tensor var_3058 = const()[name = tensor("op_3058"), val = tensor([1, 1])]; + tensor var_3060 = const()[name = tensor("op_3060"), val = tensor([1, 1])]; + tensor value_49_pad_type_0 = const()[name = tensor("value_49_pad_type_0"), val = tensor("custom")]; + tensor value_49_pad_0 = const()[name = tensor("value_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_24_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(965335872)))]; + tensor layers_24_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_24_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(968612736)))]; + tensor value_49_cast_fp16 = conv(bias = layers_24_self_attn_v_proj_bias_to_fp16, dilations = var_3060, groups = var_3012, pad = value_49_pad_0, pad_type = value_49_pad_type_0, strides = var_3058, weight = layers_24_self_attn_v_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = tensor("value_49_cast_fp16")]; + tensor var_3064 = const()[name = tensor("op_3064"), val = tensor([1, 20, 64, -1])]; + tensor var_3065_cast_fp16 = reshape(shape = var_3064, x = query_49_cast_fp16)[name = tensor("op_3065_cast_fp16")]; + tensor var_3066_to_fp16 = const()[name = tensor("op_3066_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3067_cast_fp16 = mul(x = var_3065_cast_fp16, y = var_3066_to_fp16)[name = tensor("op_3067_cast_fp16")]; + tensor var_3068 = const()[name = tensor("op_3068"), val = tensor([1, 20, 64, -1])]; + tensor var_3069_cast_fp16 = reshape(shape = var_3068, x = key_49_cast_fp16)[name = tensor("op_3069_cast_fp16")]; + tensor mh_w_49_transpose_x_0 = const()[name = tensor("mh_w_49_transpose_x_0"), val = tensor(true)]; + tensor mh_w_49_transpose_y_0 = const()[name = tensor("mh_w_49_transpose_y_0"), val = tensor(false)]; + tensor mh_w_49_cast_fp16 = matmul(transpose_x = mh_w_49_transpose_x_0, transpose_y = mh_w_49_transpose_y_0, x = var_3067_cast_fp16, y = var_3069_cast_fp16)[name = tensor("mh_w_49_cast_fp16")]; + tensor var_3072_cast_fp16 = softmax(axis = var_3010, x = mh_w_49_cast_fp16)[name = tensor("op_3072_cast_fp16")]; + tensor var_3073 = const()[name = tensor("op_3073"), val = tensor([1, 20, 64, -1])]; + tensor var_3074_cast_fp16 = reshape(shape = var_3073, x = value_49_cast_fp16)[name = tensor("op_3074_cast_fp16")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast_fp16 = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_3074_cast_fp16, y = var_3072_cast_fp16)[name = tensor("attn_49_cast_fp16")]; + tensor var_3077 = const()[name = tensor("op_3077"), val = tensor([1, 1280, 1, -1])]; + tensor input_193_cast_fp16 = reshape(shape = var_3077, x = attn_49_cast_fp16)[name = tensor("input_193_cast_fp16")]; + tensor var_3081 = const()[name = tensor("op_3081"), val = tensor([1, 1])]; + tensor var_3083 = const()[name = tensor("op_3083"), val = tensor([1, 1])]; + tensor obj_99_pad_type_0 = const()[name = tensor("obj_99_pad_type_0"), val = tensor("custom")]; + tensor obj_99_pad_0 = const()[name = tensor("obj_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_24_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(968615360)))]; + tensor layers_24_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_24_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971892224)))]; + tensor obj_99_cast_fp16 = conv(bias = layers_24_self_attn_o_proj_bias_to_fp16, dilations = var_3083, groups = var_3012, pad = obj_99_pad_0, pad_type = obj_99_pad_type_0, strides = var_3081, weight = layers_24_self_attn_o_proj_weight_to_fp16, x = input_193_cast_fp16)[name = tensor("obj_99_cast_fp16")]; + tensor inputs_99_cast_fp16 = add(x = inputs_97_cast_fp16, y = obj_99_cast_fp16)[name = tensor("inputs_99_cast_fp16")]; + tensor var_3089 = const()[name = tensor("op_3089"), val = tensor([1])]; + tensor channels_mean_99_cast_fp16 = reduce_mean(axes = var_3089, keep_dims = var_3013, x = inputs_99_cast_fp16)[name = tensor("channels_mean_99_cast_fp16")]; + tensor zero_mean_99_cast_fp16 = sub(x = inputs_99_cast_fp16, y = channels_mean_99_cast_fp16)[name = tensor("zero_mean_99_cast_fp16")]; + tensor zero_mean_sq_99_cast_fp16 = mul(x = zero_mean_99_cast_fp16, y = zero_mean_99_cast_fp16)[name = tensor("zero_mean_sq_99_cast_fp16")]; + tensor var_3093 = const()[name = tensor("op_3093"), val = tensor([1])]; + tensor var_3094_cast_fp16 = reduce_mean(axes = var_3093, keep_dims = var_3013, x = zero_mean_sq_99_cast_fp16)[name = tensor("op_3094_cast_fp16")]; + tensor var_3095_to_fp16 = const()[name = tensor("op_3095_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3096_cast_fp16 = add(x = var_3094_cast_fp16, y = var_3095_to_fp16)[name = tensor("op_3096_cast_fp16")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast_fp16 = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3096_cast_fp16)[name = tensor("denom_99_cast_fp16")]; + tensor out_99_cast_fp16 = mul(x = zero_mean_99_cast_fp16, y = denom_99_cast_fp16)[name = tensor("out_99_cast_fp16")]; + tensor input_195_gamma_0_to_fp16 = const()[name = tensor("input_195_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971894848)))]; + tensor input_195_beta_0_to_fp16 = const()[name = tensor("input_195_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971897472)))]; + tensor input_195_epsilon_0_to_fp16 = const()[name = tensor("input_195_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_195_cast_fp16 = batch_norm(beta = input_195_beta_0_to_fp16, epsilon = input_195_epsilon_0_to_fp16, gamma = input_195_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_99_cast_fp16)[name = tensor("input_195_cast_fp16")]; + tensor var_3107 = const()[name = tensor("op_3107"), val = tensor([1, 1])]; + tensor var_3109 = const()[name = tensor("op_3109"), val = tensor([1, 1])]; + tensor input_197_pad_type_0 = const()[name = tensor("input_197_pad_type_0"), val = tensor("custom")]; + tensor input_197_pad_0 = const()[name = tensor("input_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_fc1_weight_to_fp16 = const()[name = tensor("layers_24_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971900096)))]; + tensor layers_24_fc1_bias_to_fp16 = const()[name = tensor("layers_24_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(985007360)))]; + tensor input_197_cast_fp16 = conv(bias = layers_24_fc1_bias_to_fp16, dilations = var_3109, groups = var_3012, pad = input_197_pad_0, pad_type = input_197_pad_type_0, strides = var_3107, weight = layers_24_fc1_weight_to_fp16, x = input_195_cast_fp16)[name = tensor("input_197_cast_fp16")]; + tensor input_199_mode_0 = const()[name = tensor("input_199_mode_0"), val = tensor("EXACT")]; + tensor input_199_cast_fp16 = gelu(mode = input_199_mode_0, x = input_197_cast_fp16)[name = tensor("input_199_cast_fp16")]; + tensor var_3115 = const()[name = tensor("op_3115"), val = tensor([1, 1])]; + tensor var_3117 = const()[name = tensor("op_3117"), val = tensor([1, 1])]; + tensor hidden_states_53_pad_type_0 = const()[name = tensor("hidden_states_53_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_53_pad_0 = const()[name = tensor("hidden_states_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_24_fc2_weight_to_fp16 = const()[name = tensor("layers_24_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(985017664)))]; + tensor layers_24_fc2_bias_to_fp16 = const()[name = tensor("layers_24_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998124928)))]; + tensor hidden_states_53_cast_fp16 = conv(bias = layers_24_fc2_bias_to_fp16, dilations = var_3117, groups = var_3012, pad = hidden_states_53_pad_0, pad_type = hidden_states_53_pad_type_0, strides = var_3115, weight = layers_24_fc2_weight_to_fp16, x = input_199_cast_fp16)[name = tensor("hidden_states_53_cast_fp16")]; + tensor inputs_101_cast_fp16 = add(x = inputs_99_cast_fp16, y = hidden_states_53_cast_fp16)[name = tensor("inputs_101_cast_fp16")]; + tensor var_3128 = const()[name = tensor("op_3128"), val = tensor(3)]; + tensor var_3130 = const()[name = tensor("op_3130"), val = tensor(1)]; + tensor var_3131 = const()[name = tensor("op_3131"), val = tensor(true)]; + tensor var_3141 = const()[name = tensor("op_3141"), val = tensor([1])]; + tensor channels_mean_101_cast_fp16 = reduce_mean(axes = var_3141, keep_dims = var_3131, x = inputs_101_cast_fp16)[name = tensor("channels_mean_101_cast_fp16")]; + tensor zero_mean_101_cast_fp16 = sub(x = inputs_101_cast_fp16, y = channels_mean_101_cast_fp16)[name = tensor("zero_mean_101_cast_fp16")]; + tensor zero_mean_sq_101_cast_fp16 = mul(x = zero_mean_101_cast_fp16, y = zero_mean_101_cast_fp16)[name = tensor("zero_mean_sq_101_cast_fp16")]; + tensor var_3145 = const()[name = tensor("op_3145"), val = tensor([1])]; + tensor var_3146_cast_fp16 = reduce_mean(axes = var_3145, keep_dims = var_3131, x = zero_mean_sq_101_cast_fp16)[name = tensor("op_3146_cast_fp16")]; + tensor var_3147_to_fp16 = const()[name = tensor("op_3147_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3148_cast_fp16 = add(x = var_3146_cast_fp16, y = var_3147_to_fp16)[name = tensor("op_3148_cast_fp16")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast_fp16 = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3148_cast_fp16)[name = tensor("denom_101_cast_fp16")]; + tensor out_101_cast_fp16 = mul(x = zero_mean_101_cast_fp16, y = denom_101_cast_fp16)[name = tensor("out_101_cast_fp16")]; + tensor obj_101_gamma_0_to_fp16 = const()[name = tensor("obj_101_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998127552)))]; + tensor obj_101_beta_0_to_fp16 = const()[name = tensor("obj_101_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998130176)))]; + tensor obj_101_epsilon_0_to_fp16 = const()[name = tensor("obj_101_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_101_cast_fp16 = batch_norm(beta = obj_101_beta_0_to_fp16, epsilon = obj_101_epsilon_0_to_fp16, gamma = obj_101_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_101_cast_fp16)[name = tensor("obj_101_cast_fp16")]; + tensor var_3163 = const()[name = tensor("op_3163"), val = tensor([1, 1])]; + tensor var_3165 = const()[name = tensor("op_3165"), val = tensor([1, 1])]; + tensor query_51_pad_type_0 = const()[name = tensor("query_51_pad_type_0"), val = tensor("custom")]; + tensor query_51_pad_0 = const()[name = tensor("query_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_25_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998132800)))]; + tensor layers_25_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_25_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001409664)))]; + tensor query_51_cast_fp16 = conv(bias = layers_25_self_attn_q_proj_bias_to_fp16, dilations = var_3165, groups = var_3130, pad = query_51_pad_0, pad_type = query_51_pad_type_0, strides = var_3163, weight = layers_25_self_attn_q_proj_weight_to_fp16, x = obj_101_cast_fp16)[name = tensor("query_51_cast_fp16")]; + tensor var_3169 = const()[name = tensor("op_3169"), val = tensor([1, 1])]; + tensor var_3171 = const()[name = tensor("op_3171"), val = tensor([1, 1])]; + tensor key_51_pad_type_0 = const()[name = tensor("key_51_pad_type_0"), val = tensor("custom")]; + tensor key_51_pad_0 = const()[name = tensor("key_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_25_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001412288)))]; + tensor key_51_cast_fp16 = conv(dilations = var_3171, groups = var_3130, pad = key_51_pad_0, pad_type = key_51_pad_type_0, strides = var_3169, weight = layers_25_self_attn_k_proj_weight_to_fp16, x = obj_101_cast_fp16)[name = tensor("key_51_cast_fp16")]; + tensor var_3176 = const()[name = tensor("op_3176"), val = tensor([1, 1])]; + tensor var_3178 = const()[name = tensor("op_3178"), val = tensor([1, 1])]; + tensor value_51_pad_type_0 = const()[name = tensor("value_51_pad_type_0"), val = tensor("custom")]; + tensor value_51_pad_0 = const()[name = tensor("value_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_25_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1004689152)))]; + tensor layers_25_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_25_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007966016)))]; + tensor value_51_cast_fp16 = conv(bias = layers_25_self_attn_v_proj_bias_to_fp16, dilations = var_3178, groups = var_3130, pad = value_51_pad_0, pad_type = value_51_pad_type_0, strides = var_3176, weight = layers_25_self_attn_v_proj_weight_to_fp16, x = obj_101_cast_fp16)[name = tensor("value_51_cast_fp16")]; + tensor var_3182 = const()[name = tensor("op_3182"), val = tensor([1, 20, 64, -1])]; + tensor var_3183_cast_fp16 = reshape(shape = var_3182, x = query_51_cast_fp16)[name = tensor("op_3183_cast_fp16")]; + tensor var_3184_to_fp16 = const()[name = tensor("op_3184_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3185_cast_fp16 = mul(x = var_3183_cast_fp16, y = var_3184_to_fp16)[name = tensor("op_3185_cast_fp16")]; + tensor var_3186 = const()[name = tensor("op_3186"), val = tensor([1, 20, 64, -1])]; + tensor var_3187_cast_fp16 = reshape(shape = var_3186, x = key_51_cast_fp16)[name = tensor("op_3187_cast_fp16")]; + tensor mh_w_51_transpose_x_0 = const()[name = tensor("mh_w_51_transpose_x_0"), val = tensor(true)]; + tensor mh_w_51_transpose_y_0 = const()[name = tensor("mh_w_51_transpose_y_0"), val = tensor(false)]; + tensor mh_w_51_cast_fp16 = matmul(transpose_x = mh_w_51_transpose_x_0, transpose_y = mh_w_51_transpose_y_0, x = var_3185_cast_fp16, y = var_3187_cast_fp16)[name = tensor("mh_w_51_cast_fp16")]; + tensor var_3190_cast_fp16 = softmax(axis = var_3128, x = mh_w_51_cast_fp16)[name = tensor("op_3190_cast_fp16")]; + tensor var_3191 = const()[name = tensor("op_3191"), val = tensor([1, 20, 64, -1])]; + tensor var_3192_cast_fp16 = reshape(shape = var_3191, x = value_51_cast_fp16)[name = tensor("op_3192_cast_fp16")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast_fp16 = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_3192_cast_fp16, y = var_3190_cast_fp16)[name = tensor("attn_51_cast_fp16")]; + tensor var_3195 = const()[name = tensor("op_3195"), val = tensor([1, 1280, 1, -1])]; + tensor input_201_cast_fp16 = reshape(shape = var_3195, x = attn_51_cast_fp16)[name = tensor("input_201_cast_fp16")]; + tensor var_3199 = const()[name = tensor("op_3199"), val = tensor([1, 1])]; + tensor var_3201 = const()[name = tensor("op_3201"), val = tensor([1, 1])]; + tensor obj_103_pad_type_0 = const()[name = tensor("obj_103_pad_type_0"), val = tensor("custom")]; + tensor obj_103_pad_0 = const()[name = tensor("obj_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_25_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007968640)))]; + tensor layers_25_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_25_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011245504)))]; + tensor obj_103_cast_fp16 = conv(bias = layers_25_self_attn_o_proj_bias_to_fp16, dilations = var_3201, groups = var_3130, pad = obj_103_pad_0, pad_type = obj_103_pad_type_0, strides = var_3199, weight = layers_25_self_attn_o_proj_weight_to_fp16, x = input_201_cast_fp16)[name = tensor("obj_103_cast_fp16")]; + tensor inputs_103_cast_fp16 = add(x = inputs_101_cast_fp16, y = obj_103_cast_fp16)[name = tensor("inputs_103_cast_fp16")]; + tensor var_3207 = const()[name = tensor("op_3207"), val = tensor([1])]; + tensor channels_mean_103_cast_fp16 = reduce_mean(axes = var_3207, keep_dims = var_3131, x = inputs_103_cast_fp16)[name = tensor("channels_mean_103_cast_fp16")]; + tensor zero_mean_103_cast_fp16 = sub(x = inputs_103_cast_fp16, y = channels_mean_103_cast_fp16)[name = tensor("zero_mean_103_cast_fp16")]; + tensor zero_mean_sq_103_cast_fp16 = mul(x = zero_mean_103_cast_fp16, y = zero_mean_103_cast_fp16)[name = tensor("zero_mean_sq_103_cast_fp16")]; + tensor var_3211 = const()[name = tensor("op_3211"), val = tensor([1])]; + tensor var_3212_cast_fp16 = reduce_mean(axes = var_3211, keep_dims = var_3131, x = zero_mean_sq_103_cast_fp16)[name = tensor("op_3212_cast_fp16")]; + tensor var_3213_to_fp16 = const()[name = tensor("op_3213_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3214_cast_fp16 = add(x = var_3212_cast_fp16, y = var_3213_to_fp16)[name = tensor("op_3214_cast_fp16")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast_fp16 = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3214_cast_fp16)[name = tensor("denom_103_cast_fp16")]; + tensor out_103_cast_fp16 = mul(x = zero_mean_103_cast_fp16, y = denom_103_cast_fp16)[name = tensor("out_103_cast_fp16")]; + tensor input_203_gamma_0_to_fp16 = const()[name = tensor("input_203_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011248128)))]; + tensor input_203_beta_0_to_fp16 = const()[name = tensor("input_203_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011250752)))]; + tensor input_203_epsilon_0_to_fp16 = const()[name = tensor("input_203_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_203_cast_fp16 = batch_norm(beta = input_203_beta_0_to_fp16, epsilon = input_203_epsilon_0_to_fp16, gamma = input_203_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_103_cast_fp16)[name = tensor("input_203_cast_fp16")]; + tensor var_3225 = const()[name = tensor("op_3225"), val = tensor([1, 1])]; + tensor var_3227 = const()[name = tensor("op_3227"), val = tensor([1, 1])]; + tensor input_205_pad_type_0 = const()[name = tensor("input_205_pad_type_0"), val = tensor("custom")]; + tensor input_205_pad_0 = const()[name = tensor("input_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_fc1_weight_to_fp16 = const()[name = tensor("layers_25_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011253376)))]; + tensor layers_25_fc1_bias_to_fp16 = const()[name = tensor("layers_25_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024360640)))]; + tensor input_205_cast_fp16 = conv(bias = layers_25_fc1_bias_to_fp16, dilations = var_3227, groups = var_3130, pad = input_205_pad_0, pad_type = input_205_pad_type_0, strides = var_3225, weight = layers_25_fc1_weight_to_fp16, x = input_203_cast_fp16)[name = tensor("input_205_cast_fp16")]; + tensor input_207_mode_0 = const()[name = tensor("input_207_mode_0"), val = tensor("EXACT")]; + tensor input_207_cast_fp16 = gelu(mode = input_207_mode_0, x = input_205_cast_fp16)[name = tensor("input_207_cast_fp16")]; + tensor var_3233 = const()[name = tensor("op_3233"), val = tensor([1, 1])]; + tensor var_3235 = const()[name = tensor("op_3235"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_25_fc2_weight_to_fp16 = const()[name = tensor("layers_25_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024370944)))]; + tensor layers_25_fc2_bias_to_fp16 = const()[name = tensor("layers_25_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1037478208)))]; + tensor hidden_states_55_cast_fp16 = conv(bias = layers_25_fc2_bias_to_fp16, dilations = var_3235, groups = var_3130, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_3233, weight = layers_25_fc2_weight_to_fp16, x = input_207_cast_fp16)[name = tensor("hidden_states_55_cast_fp16")]; + tensor inputs_105_cast_fp16 = add(x = inputs_103_cast_fp16, y = hidden_states_55_cast_fp16)[name = tensor("inputs_105_cast_fp16")]; + tensor var_3246 = const()[name = tensor("op_3246"), val = tensor(3)]; + tensor var_3248 = const()[name = tensor("op_3248"), val = tensor(1)]; + tensor var_3249 = const()[name = tensor("op_3249"), val = tensor(true)]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([1])]; + tensor channels_mean_105_cast_fp16 = reduce_mean(axes = var_3259, keep_dims = var_3249, x = inputs_105_cast_fp16)[name = tensor("channels_mean_105_cast_fp16")]; + tensor zero_mean_105_cast_fp16 = sub(x = inputs_105_cast_fp16, y = channels_mean_105_cast_fp16)[name = tensor("zero_mean_105_cast_fp16")]; + tensor zero_mean_sq_105_cast_fp16 = mul(x = zero_mean_105_cast_fp16, y = zero_mean_105_cast_fp16)[name = tensor("zero_mean_sq_105_cast_fp16")]; + tensor var_3263 = const()[name = tensor("op_3263"), val = tensor([1])]; + tensor var_3264_cast_fp16 = reduce_mean(axes = var_3263, keep_dims = var_3249, x = zero_mean_sq_105_cast_fp16)[name = tensor("op_3264_cast_fp16")]; + tensor var_3265_to_fp16 = const()[name = tensor("op_3265_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3266_cast_fp16 = add(x = var_3264_cast_fp16, y = var_3265_to_fp16)[name = tensor("op_3266_cast_fp16")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast_fp16 = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3266_cast_fp16)[name = tensor("denom_105_cast_fp16")]; + tensor out_105_cast_fp16 = mul(x = zero_mean_105_cast_fp16, y = denom_105_cast_fp16)[name = tensor("out_105_cast_fp16")]; + tensor obj_105_gamma_0_to_fp16 = const()[name = tensor("obj_105_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1037480832)))]; + tensor obj_105_beta_0_to_fp16 = const()[name = tensor("obj_105_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1037483456)))]; + tensor obj_105_epsilon_0_to_fp16 = const()[name = tensor("obj_105_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_105_cast_fp16 = batch_norm(beta = obj_105_beta_0_to_fp16, epsilon = obj_105_epsilon_0_to_fp16, gamma = obj_105_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_105_cast_fp16)[name = tensor("obj_105_cast_fp16")]; + tensor var_3281 = const()[name = tensor("op_3281"), val = tensor([1, 1])]; + tensor var_3283 = const()[name = tensor("op_3283"), val = tensor([1, 1])]; + tensor query_53_pad_type_0 = const()[name = tensor("query_53_pad_type_0"), val = tensor("custom")]; + tensor query_53_pad_0 = const()[name = tensor("query_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_26_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1037486080)))]; + tensor layers_26_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_26_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040762944)))]; + tensor query_53_cast_fp16 = conv(bias = layers_26_self_attn_q_proj_bias_to_fp16, dilations = var_3283, groups = var_3248, pad = query_53_pad_0, pad_type = query_53_pad_type_0, strides = var_3281, weight = layers_26_self_attn_q_proj_weight_to_fp16, x = obj_105_cast_fp16)[name = tensor("query_53_cast_fp16")]; + tensor var_3287 = const()[name = tensor("op_3287"), val = tensor([1, 1])]; + tensor var_3289 = const()[name = tensor("op_3289"), val = tensor([1, 1])]; + tensor key_53_pad_type_0 = const()[name = tensor("key_53_pad_type_0"), val = tensor("custom")]; + tensor key_53_pad_0 = const()[name = tensor("key_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_26_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040765568)))]; + tensor key_53_cast_fp16 = conv(dilations = var_3289, groups = var_3248, pad = key_53_pad_0, pad_type = key_53_pad_type_0, strides = var_3287, weight = layers_26_self_attn_k_proj_weight_to_fp16, x = obj_105_cast_fp16)[name = tensor("key_53_cast_fp16")]; + tensor var_3294 = const()[name = tensor("op_3294"), val = tensor([1, 1])]; + tensor var_3296 = const()[name = tensor("op_3296"), val = tensor([1, 1])]; + tensor value_53_pad_type_0 = const()[name = tensor("value_53_pad_type_0"), val = tensor("custom")]; + tensor value_53_pad_0 = const()[name = tensor("value_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_26_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044042432)))]; + tensor layers_26_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_26_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047319296)))]; + tensor value_53_cast_fp16 = conv(bias = layers_26_self_attn_v_proj_bias_to_fp16, dilations = var_3296, groups = var_3248, pad = value_53_pad_0, pad_type = value_53_pad_type_0, strides = var_3294, weight = layers_26_self_attn_v_proj_weight_to_fp16, x = obj_105_cast_fp16)[name = tensor("value_53_cast_fp16")]; + tensor var_3300 = const()[name = tensor("op_3300"), val = tensor([1, 20, 64, -1])]; + tensor var_3301_cast_fp16 = reshape(shape = var_3300, x = query_53_cast_fp16)[name = tensor("op_3301_cast_fp16")]; + tensor var_3302_to_fp16 = const()[name = tensor("op_3302_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3303_cast_fp16 = mul(x = var_3301_cast_fp16, y = var_3302_to_fp16)[name = tensor("op_3303_cast_fp16")]; + tensor var_3304 = const()[name = tensor("op_3304"), val = tensor([1, 20, 64, -1])]; + tensor var_3305_cast_fp16 = reshape(shape = var_3304, x = key_53_cast_fp16)[name = tensor("op_3305_cast_fp16")]; + tensor mh_w_53_transpose_x_0 = const()[name = tensor("mh_w_53_transpose_x_0"), val = tensor(true)]; + tensor mh_w_53_transpose_y_0 = const()[name = tensor("mh_w_53_transpose_y_0"), val = tensor(false)]; + tensor mh_w_53_cast_fp16 = matmul(transpose_x = mh_w_53_transpose_x_0, transpose_y = mh_w_53_transpose_y_0, x = var_3303_cast_fp16, y = var_3305_cast_fp16)[name = tensor("mh_w_53_cast_fp16")]; + tensor var_3308_cast_fp16 = softmax(axis = var_3246, x = mh_w_53_cast_fp16)[name = tensor("op_3308_cast_fp16")]; + tensor var_3309 = const()[name = tensor("op_3309"), val = tensor([1, 20, 64, -1])]; + tensor var_3310_cast_fp16 = reshape(shape = var_3309, x = value_53_cast_fp16)[name = tensor("op_3310_cast_fp16")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast_fp16 = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_3310_cast_fp16, y = var_3308_cast_fp16)[name = tensor("attn_53_cast_fp16")]; + tensor var_3313 = const()[name = tensor("op_3313"), val = tensor([1, 1280, 1, -1])]; + tensor input_209_cast_fp16 = reshape(shape = var_3313, x = attn_53_cast_fp16)[name = tensor("input_209_cast_fp16")]; + tensor var_3317 = const()[name = tensor("op_3317"), val = tensor([1, 1])]; + tensor var_3319 = const()[name = tensor("op_3319"), val = tensor([1, 1])]; + tensor obj_107_pad_type_0 = const()[name = tensor("obj_107_pad_type_0"), val = tensor("custom")]; + tensor obj_107_pad_0 = const()[name = tensor("obj_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_26_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047321920)))]; + tensor layers_26_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_26_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050598784)))]; + tensor obj_107_cast_fp16 = conv(bias = layers_26_self_attn_o_proj_bias_to_fp16, dilations = var_3319, groups = var_3248, pad = obj_107_pad_0, pad_type = obj_107_pad_type_0, strides = var_3317, weight = layers_26_self_attn_o_proj_weight_to_fp16, x = input_209_cast_fp16)[name = tensor("obj_107_cast_fp16")]; + tensor inputs_107_cast_fp16 = add(x = inputs_105_cast_fp16, y = obj_107_cast_fp16)[name = tensor("inputs_107_cast_fp16")]; + tensor var_3325 = const()[name = tensor("op_3325"), val = tensor([1])]; + tensor channels_mean_107_cast_fp16 = reduce_mean(axes = var_3325, keep_dims = var_3249, x = inputs_107_cast_fp16)[name = tensor("channels_mean_107_cast_fp16")]; + tensor zero_mean_107_cast_fp16 = sub(x = inputs_107_cast_fp16, y = channels_mean_107_cast_fp16)[name = tensor("zero_mean_107_cast_fp16")]; + tensor zero_mean_sq_107_cast_fp16 = mul(x = zero_mean_107_cast_fp16, y = zero_mean_107_cast_fp16)[name = tensor("zero_mean_sq_107_cast_fp16")]; + tensor var_3329 = const()[name = tensor("op_3329"), val = tensor([1])]; + tensor var_3330_cast_fp16 = reduce_mean(axes = var_3329, keep_dims = var_3249, x = zero_mean_sq_107_cast_fp16)[name = tensor("op_3330_cast_fp16")]; + tensor var_3331_to_fp16 = const()[name = tensor("op_3331_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3332_cast_fp16 = add(x = var_3330_cast_fp16, y = var_3331_to_fp16)[name = tensor("op_3332_cast_fp16")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast_fp16 = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3332_cast_fp16)[name = tensor("denom_107_cast_fp16")]; + tensor out_107_cast_fp16 = mul(x = zero_mean_107_cast_fp16, y = denom_107_cast_fp16)[name = tensor("out_107_cast_fp16")]; + tensor input_211_gamma_0_to_fp16 = const()[name = tensor("input_211_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050601408)))]; + tensor input_211_beta_0_to_fp16 = const()[name = tensor("input_211_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050604032)))]; + tensor input_211_epsilon_0_to_fp16 = const()[name = tensor("input_211_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_211_cast_fp16 = batch_norm(beta = input_211_beta_0_to_fp16, epsilon = input_211_epsilon_0_to_fp16, gamma = input_211_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_107_cast_fp16)[name = tensor("input_211_cast_fp16")]; + tensor var_3343 = const()[name = tensor("op_3343"), val = tensor([1, 1])]; + tensor var_3345 = const()[name = tensor("op_3345"), val = tensor([1, 1])]; + tensor input_213_pad_type_0 = const()[name = tensor("input_213_pad_type_0"), val = tensor("custom")]; + tensor input_213_pad_0 = const()[name = tensor("input_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_fc1_weight_to_fp16 = const()[name = tensor("layers_26_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050606656)))]; + tensor layers_26_fc1_bias_to_fp16 = const()[name = tensor("layers_26_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1063713920)))]; + tensor input_213_cast_fp16 = conv(bias = layers_26_fc1_bias_to_fp16, dilations = var_3345, groups = var_3248, pad = input_213_pad_0, pad_type = input_213_pad_type_0, strides = var_3343, weight = layers_26_fc1_weight_to_fp16, x = input_211_cast_fp16)[name = tensor("input_213_cast_fp16")]; + tensor input_215_mode_0 = const()[name = tensor("input_215_mode_0"), val = tensor("EXACT")]; + tensor input_215_cast_fp16 = gelu(mode = input_215_mode_0, x = input_213_cast_fp16)[name = tensor("input_215_cast_fp16")]; + tensor var_3351 = const()[name = tensor("op_3351"), val = tensor([1, 1])]; + tensor var_3353 = const()[name = tensor("op_3353"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_26_fc2_weight_to_fp16 = const()[name = tensor("layers_26_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1063724224)))]; + tensor layers_26_fc2_bias_to_fp16 = const()[name = tensor("layers_26_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076831488)))]; + tensor hidden_states_57_cast_fp16 = conv(bias = layers_26_fc2_bias_to_fp16, dilations = var_3353, groups = var_3248, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_3351, weight = layers_26_fc2_weight_to_fp16, x = input_215_cast_fp16)[name = tensor("hidden_states_57_cast_fp16")]; + tensor inputs_109_cast_fp16 = add(x = inputs_107_cast_fp16, y = hidden_states_57_cast_fp16)[name = tensor("inputs_109_cast_fp16")]; + tensor var_3364 = const()[name = tensor("op_3364"), val = tensor(3)]; + tensor var_3366 = const()[name = tensor("op_3366"), val = tensor(1)]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor(true)]; + tensor var_3377 = const()[name = tensor("op_3377"), val = tensor([1])]; + tensor channels_mean_109_cast_fp16 = reduce_mean(axes = var_3377, keep_dims = var_3367, x = inputs_109_cast_fp16)[name = tensor("channels_mean_109_cast_fp16")]; + tensor zero_mean_109_cast_fp16 = sub(x = inputs_109_cast_fp16, y = channels_mean_109_cast_fp16)[name = tensor("zero_mean_109_cast_fp16")]; + tensor zero_mean_sq_109_cast_fp16 = mul(x = zero_mean_109_cast_fp16, y = zero_mean_109_cast_fp16)[name = tensor("zero_mean_sq_109_cast_fp16")]; + tensor var_3381 = const()[name = tensor("op_3381"), val = tensor([1])]; + tensor var_3382_cast_fp16 = reduce_mean(axes = var_3381, keep_dims = var_3367, x = zero_mean_sq_109_cast_fp16)[name = tensor("op_3382_cast_fp16")]; + tensor var_3383_to_fp16 = const()[name = tensor("op_3383_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3384_cast_fp16 = add(x = var_3382_cast_fp16, y = var_3383_to_fp16)[name = tensor("op_3384_cast_fp16")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast_fp16 = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3384_cast_fp16)[name = tensor("denom_109_cast_fp16")]; + tensor out_109_cast_fp16 = mul(x = zero_mean_109_cast_fp16, y = denom_109_cast_fp16)[name = tensor("out_109_cast_fp16")]; + tensor obj_109_gamma_0_to_fp16 = const()[name = tensor("obj_109_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076834112)))]; + tensor obj_109_beta_0_to_fp16 = const()[name = tensor("obj_109_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076836736)))]; + tensor obj_109_epsilon_0_to_fp16 = const()[name = tensor("obj_109_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_109_cast_fp16 = batch_norm(beta = obj_109_beta_0_to_fp16, epsilon = obj_109_epsilon_0_to_fp16, gamma = obj_109_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_109_cast_fp16)[name = tensor("obj_109_cast_fp16")]; + tensor var_3399 = const()[name = tensor("op_3399"), val = tensor([1, 1])]; + tensor var_3401 = const()[name = tensor("op_3401"), val = tensor([1, 1])]; + tensor query_55_pad_type_0 = const()[name = tensor("query_55_pad_type_0"), val = tensor("custom")]; + tensor query_55_pad_0 = const()[name = tensor("query_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_27_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076839360)))]; + tensor layers_27_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_27_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080116224)))]; + tensor query_55_cast_fp16 = conv(bias = layers_27_self_attn_q_proj_bias_to_fp16, dilations = var_3401, groups = var_3366, pad = query_55_pad_0, pad_type = query_55_pad_type_0, strides = var_3399, weight = layers_27_self_attn_q_proj_weight_to_fp16, x = obj_109_cast_fp16)[name = tensor("query_55_cast_fp16")]; + tensor var_3405 = const()[name = tensor("op_3405"), val = tensor([1, 1])]; + tensor var_3407 = const()[name = tensor("op_3407"), val = tensor([1, 1])]; + tensor key_55_pad_type_0 = const()[name = tensor("key_55_pad_type_0"), val = tensor("custom")]; + tensor key_55_pad_0 = const()[name = tensor("key_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_27_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080118848)))]; + tensor key_55_cast_fp16 = conv(dilations = var_3407, groups = var_3366, pad = key_55_pad_0, pad_type = key_55_pad_type_0, strides = var_3405, weight = layers_27_self_attn_k_proj_weight_to_fp16, x = obj_109_cast_fp16)[name = tensor("key_55_cast_fp16")]; + tensor var_3412 = const()[name = tensor("op_3412"), val = tensor([1, 1])]; + tensor var_3414 = const()[name = tensor("op_3414"), val = tensor([1, 1])]; + tensor value_55_pad_type_0 = const()[name = tensor("value_55_pad_type_0"), val = tensor("custom")]; + tensor value_55_pad_0 = const()[name = tensor("value_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_27_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083395712)))]; + tensor layers_27_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_27_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1086672576)))]; + tensor value_55_cast_fp16 = conv(bias = layers_27_self_attn_v_proj_bias_to_fp16, dilations = var_3414, groups = var_3366, pad = value_55_pad_0, pad_type = value_55_pad_type_0, strides = var_3412, weight = layers_27_self_attn_v_proj_weight_to_fp16, x = obj_109_cast_fp16)[name = tensor("value_55_cast_fp16")]; + tensor var_3418 = const()[name = tensor("op_3418"), val = tensor([1, 20, 64, -1])]; + tensor var_3419_cast_fp16 = reshape(shape = var_3418, x = query_55_cast_fp16)[name = tensor("op_3419_cast_fp16")]; + tensor var_3420_to_fp16 = const()[name = tensor("op_3420_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3421_cast_fp16 = mul(x = var_3419_cast_fp16, y = var_3420_to_fp16)[name = tensor("op_3421_cast_fp16")]; + tensor var_3422 = const()[name = tensor("op_3422"), val = tensor([1, 20, 64, -1])]; + tensor var_3423_cast_fp16 = reshape(shape = var_3422, x = key_55_cast_fp16)[name = tensor("op_3423_cast_fp16")]; + tensor mh_w_55_transpose_x_0 = const()[name = tensor("mh_w_55_transpose_x_0"), val = tensor(true)]; + tensor mh_w_55_transpose_y_0 = const()[name = tensor("mh_w_55_transpose_y_0"), val = tensor(false)]; + tensor mh_w_55_cast_fp16 = matmul(transpose_x = mh_w_55_transpose_x_0, transpose_y = mh_w_55_transpose_y_0, x = var_3421_cast_fp16, y = var_3423_cast_fp16)[name = tensor("mh_w_55_cast_fp16")]; + tensor var_3426_cast_fp16 = softmax(axis = var_3364, x = mh_w_55_cast_fp16)[name = tensor("op_3426_cast_fp16")]; + tensor var_3427 = const()[name = tensor("op_3427"), val = tensor([1, 20, 64, -1])]; + tensor var_3428_cast_fp16 = reshape(shape = var_3427, x = value_55_cast_fp16)[name = tensor("op_3428_cast_fp16")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast_fp16 = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_3428_cast_fp16, y = var_3426_cast_fp16)[name = tensor("attn_55_cast_fp16")]; + tensor var_3431 = const()[name = tensor("op_3431"), val = tensor([1, 1280, 1, -1])]; + tensor input_217_cast_fp16 = reshape(shape = var_3431, x = attn_55_cast_fp16)[name = tensor("input_217_cast_fp16")]; + tensor var_3435 = const()[name = tensor("op_3435"), val = tensor([1, 1])]; + tensor var_3437 = const()[name = tensor("op_3437"), val = tensor([1, 1])]; + tensor obj_111_pad_type_0 = const()[name = tensor("obj_111_pad_type_0"), val = tensor("custom")]; + tensor obj_111_pad_0 = const()[name = tensor("obj_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_27_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1086675200)))]; + tensor layers_27_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_27_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089952064)))]; + tensor obj_111_cast_fp16 = conv(bias = layers_27_self_attn_o_proj_bias_to_fp16, dilations = var_3437, groups = var_3366, pad = obj_111_pad_0, pad_type = obj_111_pad_type_0, strides = var_3435, weight = layers_27_self_attn_o_proj_weight_to_fp16, x = input_217_cast_fp16)[name = tensor("obj_111_cast_fp16")]; + tensor inputs_111_cast_fp16 = add(x = inputs_109_cast_fp16, y = obj_111_cast_fp16)[name = tensor("inputs_111_cast_fp16")]; + tensor var_3443 = const()[name = tensor("op_3443"), val = tensor([1])]; + tensor channels_mean_111_cast_fp16 = reduce_mean(axes = var_3443, keep_dims = var_3367, x = inputs_111_cast_fp16)[name = tensor("channels_mean_111_cast_fp16")]; + tensor zero_mean_111_cast_fp16 = sub(x = inputs_111_cast_fp16, y = channels_mean_111_cast_fp16)[name = tensor("zero_mean_111_cast_fp16")]; + tensor zero_mean_sq_111_cast_fp16 = mul(x = zero_mean_111_cast_fp16, y = zero_mean_111_cast_fp16)[name = tensor("zero_mean_sq_111_cast_fp16")]; + tensor var_3447 = const()[name = tensor("op_3447"), val = tensor([1])]; + tensor var_3448_cast_fp16 = reduce_mean(axes = var_3447, keep_dims = var_3367, x = zero_mean_sq_111_cast_fp16)[name = tensor("op_3448_cast_fp16")]; + tensor var_3449_to_fp16 = const()[name = tensor("op_3449_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3450_cast_fp16 = add(x = var_3448_cast_fp16, y = var_3449_to_fp16)[name = tensor("op_3450_cast_fp16")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast_fp16 = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3450_cast_fp16)[name = tensor("denom_111_cast_fp16")]; + tensor out_111_cast_fp16 = mul(x = zero_mean_111_cast_fp16, y = denom_111_cast_fp16)[name = tensor("out_111_cast_fp16")]; + tensor input_219_gamma_0_to_fp16 = const()[name = tensor("input_219_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089954688)))]; + tensor input_219_beta_0_to_fp16 = const()[name = tensor("input_219_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089957312)))]; + tensor input_219_epsilon_0_to_fp16 = const()[name = tensor("input_219_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_219_cast_fp16 = batch_norm(beta = input_219_beta_0_to_fp16, epsilon = input_219_epsilon_0_to_fp16, gamma = input_219_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_111_cast_fp16)[name = tensor("input_219_cast_fp16")]; + tensor var_3461 = const()[name = tensor("op_3461"), val = tensor([1, 1])]; + tensor var_3463 = const()[name = tensor("op_3463"), val = tensor([1, 1])]; + tensor input_221_pad_type_0 = const()[name = tensor("input_221_pad_type_0"), val = tensor("custom")]; + tensor input_221_pad_0 = const()[name = tensor("input_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_fc1_weight_to_fp16 = const()[name = tensor("layers_27_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089959936)))]; + tensor layers_27_fc1_bias_to_fp16 = const()[name = tensor("layers_27_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1103067200)))]; + tensor input_221_cast_fp16 = conv(bias = layers_27_fc1_bias_to_fp16, dilations = var_3463, groups = var_3366, pad = input_221_pad_0, pad_type = input_221_pad_type_0, strides = var_3461, weight = layers_27_fc1_weight_to_fp16, x = input_219_cast_fp16)[name = tensor("input_221_cast_fp16")]; + tensor input_223_mode_0 = const()[name = tensor("input_223_mode_0"), val = tensor("EXACT")]; + tensor input_223_cast_fp16 = gelu(mode = input_223_mode_0, x = input_221_cast_fp16)[name = tensor("input_223_cast_fp16")]; + tensor var_3469 = const()[name = tensor("op_3469"), val = tensor([1, 1])]; + tensor var_3471 = const()[name = tensor("op_3471"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_27_fc2_weight_to_fp16 = const()[name = tensor("layers_27_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1103077504)))]; + tensor layers_27_fc2_bias_to_fp16 = const()[name = tensor("layers_27_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116184768)))]; + tensor hidden_states_59_cast_fp16 = conv(bias = layers_27_fc2_bias_to_fp16, dilations = var_3471, groups = var_3366, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_3469, weight = layers_27_fc2_weight_to_fp16, x = input_223_cast_fp16)[name = tensor("hidden_states_59_cast_fp16")]; + tensor inputs_113_cast_fp16 = add(x = inputs_111_cast_fp16, y = hidden_states_59_cast_fp16)[name = tensor("inputs_113_cast_fp16")]; + tensor var_3482 = const()[name = tensor("op_3482"), val = tensor(3)]; + tensor var_3484 = const()[name = tensor("op_3484"), val = tensor(1)]; + tensor var_3485 = const()[name = tensor("op_3485"), val = tensor(true)]; + tensor var_3495 = const()[name = tensor("op_3495"), val = tensor([1])]; + tensor channels_mean_113_cast_fp16 = reduce_mean(axes = var_3495, keep_dims = var_3485, x = inputs_113_cast_fp16)[name = tensor("channels_mean_113_cast_fp16")]; + tensor zero_mean_113_cast_fp16 = sub(x = inputs_113_cast_fp16, y = channels_mean_113_cast_fp16)[name = tensor("zero_mean_113_cast_fp16")]; + tensor zero_mean_sq_113_cast_fp16 = mul(x = zero_mean_113_cast_fp16, y = zero_mean_113_cast_fp16)[name = tensor("zero_mean_sq_113_cast_fp16")]; + tensor var_3499 = const()[name = tensor("op_3499"), val = tensor([1])]; + tensor var_3500_cast_fp16 = reduce_mean(axes = var_3499, keep_dims = var_3485, x = zero_mean_sq_113_cast_fp16)[name = tensor("op_3500_cast_fp16")]; + tensor var_3501_to_fp16 = const()[name = tensor("op_3501_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3502_cast_fp16 = add(x = var_3500_cast_fp16, y = var_3501_to_fp16)[name = tensor("op_3502_cast_fp16")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast_fp16 = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3502_cast_fp16)[name = tensor("denom_113_cast_fp16")]; + tensor out_113_cast_fp16 = mul(x = zero_mean_113_cast_fp16, y = denom_113_cast_fp16)[name = tensor("out_113_cast_fp16")]; + tensor obj_113_gamma_0_to_fp16 = const()[name = tensor("obj_113_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116187392)))]; + tensor obj_113_beta_0_to_fp16 = const()[name = tensor("obj_113_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116190016)))]; + tensor obj_113_epsilon_0_to_fp16 = const()[name = tensor("obj_113_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_113_cast_fp16 = batch_norm(beta = obj_113_beta_0_to_fp16, epsilon = obj_113_epsilon_0_to_fp16, gamma = obj_113_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_113_cast_fp16)[name = tensor("obj_113_cast_fp16")]; + tensor var_3517 = const()[name = tensor("op_3517"), val = tensor([1, 1])]; + tensor var_3519 = const()[name = tensor("op_3519"), val = tensor([1, 1])]; + tensor query_57_pad_type_0 = const()[name = tensor("query_57_pad_type_0"), val = tensor("custom")]; + tensor query_57_pad_0 = const()[name = tensor("query_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_28_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116192640)))]; + tensor layers_28_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_28_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119469504)))]; + tensor query_57_cast_fp16 = conv(bias = layers_28_self_attn_q_proj_bias_to_fp16, dilations = var_3519, groups = var_3484, pad = query_57_pad_0, pad_type = query_57_pad_type_0, strides = var_3517, weight = layers_28_self_attn_q_proj_weight_to_fp16, x = obj_113_cast_fp16)[name = tensor("query_57_cast_fp16")]; + tensor var_3523 = const()[name = tensor("op_3523"), val = tensor([1, 1])]; + tensor var_3525 = const()[name = tensor("op_3525"), val = tensor([1, 1])]; + tensor key_57_pad_type_0 = const()[name = tensor("key_57_pad_type_0"), val = tensor("custom")]; + tensor key_57_pad_0 = const()[name = tensor("key_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_28_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119472128)))]; + tensor key_57_cast_fp16 = conv(dilations = var_3525, groups = var_3484, pad = key_57_pad_0, pad_type = key_57_pad_type_0, strides = var_3523, weight = layers_28_self_attn_k_proj_weight_to_fp16, x = obj_113_cast_fp16)[name = tensor("key_57_cast_fp16")]; + tensor var_3530 = const()[name = tensor("op_3530"), val = tensor([1, 1])]; + tensor var_3532 = const()[name = tensor("op_3532"), val = tensor([1, 1])]; + tensor value_57_pad_type_0 = const()[name = tensor("value_57_pad_type_0"), val = tensor("custom")]; + tensor value_57_pad_0 = const()[name = tensor("value_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_28_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122748992)))]; + tensor layers_28_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_28_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126025856)))]; + tensor value_57_cast_fp16 = conv(bias = layers_28_self_attn_v_proj_bias_to_fp16, dilations = var_3532, groups = var_3484, pad = value_57_pad_0, pad_type = value_57_pad_type_0, strides = var_3530, weight = layers_28_self_attn_v_proj_weight_to_fp16, x = obj_113_cast_fp16)[name = tensor("value_57_cast_fp16")]; + tensor var_3536 = const()[name = tensor("op_3536"), val = tensor([1, 20, 64, -1])]; + tensor var_3537_cast_fp16 = reshape(shape = var_3536, x = query_57_cast_fp16)[name = tensor("op_3537_cast_fp16")]; + tensor var_3538_to_fp16 = const()[name = tensor("op_3538_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3539_cast_fp16 = mul(x = var_3537_cast_fp16, y = var_3538_to_fp16)[name = tensor("op_3539_cast_fp16")]; + tensor var_3540 = const()[name = tensor("op_3540"), val = tensor([1, 20, 64, -1])]; + tensor var_3541_cast_fp16 = reshape(shape = var_3540, x = key_57_cast_fp16)[name = tensor("op_3541_cast_fp16")]; + tensor mh_w_57_transpose_x_0 = const()[name = tensor("mh_w_57_transpose_x_0"), val = tensor(true)]; + tensor mh_w_57_transpose_y_0 = const()[name = tensor("mh_w_57_transpose_y_0"), val = tensor(false)]; + tensor mh_w_57_cast_fp16 = matmul(transpose_x = mh_w_57_transpose_x_0, transpose_y = mh_w_57_transpose_y_0, x = var_3539_cast_fp16, y = var_3541_cast_fp16)[name = tensor("mh_w_57_cast_fp16")]; + tensor var_3544_cast_fp16 = softmax(axis = var_3482, x = mh_w_57_cast_fp16)[name = tensor("op_3544_cast_fp16")]; + tensor var_3545 = const()[name = tensor("op_3545"), val = tensor([1, 20, 64, -1])]; + tensor var_3546_cast_fp16 = reshape(shape = var_3545, x = value_57_cast_fp16)[name = tensor("op_3546_cast_fp16")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast_fp16 = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3546_cast_fp16, y = var_3544_cast_fp16)[name = tensor("attn_57_cast_fp16")]; + tensor var_3549 = const()[name = tensor("op_3549"), val = tensor([1, 1280, 1, -1])]; + tensor input_225_cast_fp16 = reshape(shape = var_3549, x = attn_57_cast_fp16)[name = tensor("input_225_cast_fp16")]; + tensor var_3553 = const()[name = tensor("op_3553"), val = tensor([1, 1])]; + tensor var_3555 = const()[name = tensor("op_3555"), val = tensor([1, 1])]; + tensor obj_115_pad_type_0 = const()[name = tensor("obj_115_pad_type_0"), val = tensor("custom")]; + tensor obj_115_pad_0 = const()[name = tensor("obj_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_28_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126028480)))]; + tensor layers_28_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_28_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129305344)))]; + tensor obj_115_cast_fp16 = conv(bias = layers_28_self_attn_o_proj_bias_to_fp16, dilations = var_3555, groups = var_3484, pad = obj_115_pad_0, pad_type = obj_115_pad_type_0, strides = var_3553, weight = layers_28_self_attn_o_proj_weight_to_fp16, x = input_225_cast_fp16)[name = tensor("obj_115_cast_fp16")]; + tensor inputs_115_cast_fp16 = add(x = inputs_113_cast_fp16, y = obj_115_cast_fp16)[name = tensor("inputs_115_cast_fp16")]; + tensor var_3561 = const()[name = tensor("op_3561"), val = tensor([1])]; + tensor channels_mean_115_cast_fp16 = reduce_mean(axes = var_3561, keep_dims = var_3485, x = inputs_115_cast_fp16)[name = tensor("channels_mean_115_cast_fp16")]; + tensor zero_mean_115_cast_fp16 = sub(x = inputs_115_cast_fp16, y = channels_mean_115_cast_fp16)[name = tensor("zero_mean_115_cast_fp16")]; + tensor zero_mean_sq_115_cast_fp16 = mul(x = zero_mean_115_cast_fp16, y = zero_mean_115_cast_fp16)[name = tensor("zero_mean_sq_115_cast_fp16")]; + tensor var_3565 = const()[name = tensor("op_3565"), val = tensor([1])]; + tensor var_3566_cast_fp16 = reduce_mean(axes = var_3565, keep_dims = var_3485, x = zero_mean_sq_115_cast_fp16)[name = tensor("op_3566_cast_fp16")]; + tensor var_3567_to_fp16 = const()[name = tensor("op_3567_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3568_cast_fp16 = add(x = var_3566_cast_fp16, y = var_3567_to_fp16)[name = tensor("op_3568_cast_fp16")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast_fp16 = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_3568_cast_fp16)[name = tensor("denom_115_cast_fp16")]; + tensor out_115_cast_fp16 = mul(x = zero_mean_115_cast_fp16, y = denom_115_cast_fp16)[name = tensor("out_115_cast_fp16")]; + tensor input_227_gamma_0_to_fp16 = const()[name = tensor("input_227_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129307968)))]; + tensor input_227_beta_0_to_fp16 = const()[name = tensor("input_227_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129310592)))]; + tensor input_227_epsilon_0_to_fp16 = const()[name = tensor("input_227_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_227_cast_fp16 = batch_norm(beta = input_227_beta_0_to_fp16, epsilon = input_227_epsilon_0_to_fp16, gamma = input_227_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_115_cast_fp16)[name = tensor("input_227_cast_fp16")]; + tensor var_3579 = const()[name = tensor("op_3579"), val = tensor([1, 1])]; + tensor var_3581 = const()[name = tensor("op_3581"), val = tensor([1, 1])]; + tensor input_229_pad_type_0 = const()[name = tensor("input_229_pad_type_0"), val = tensor("custom")]; + tensor input_229_pad_0 = const()[name = tensor("input_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_fc1_weight_to_fp16 = const()[name = tensor("layers_28_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129313216)))]; + tensor layers_28_fc1_bias_to_fp16 = const()[name = tensor("layers_28_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142420480)))]; + tensor input_229_cast_fp16 = conv(bias = layers_28_fc1_bias_to_fp16, dilations = var_3581, groups = var_3484, pad = input_229_pad_0, pad_type = input_229_pad_type_0, strides = var_3579, weight = layers_28_fc1_weight_to_fp16, x = input_227_cast_fp16)[name = tensor("input_229_cast_fp16")]; + tensor input_231_mode_0 = const()[name = tensor("input_231_mode_0"), val = tensor("EXACT")]; + tensor input_231_cast_fp16 = gelu(mode = input_231_mode_0, x = input_229_cast_fp16)[name = tensor("input_231_cast_fp16")]; + tensor var_3587 = const()[name = tensor("op_3587"), val = tensor([1, 1])]; + tensor var_3589 = const()[name = tensor("op_3589"), val = tensor([1, 1])]; + tensor hidden_states_61_pad_type_0 = const()[name = tensor("hidden_states_61_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_61_pad_0 = const()[name = tensor("hidden_states_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_28_fc2_weight_to_fp16 = const()[name = tensor("layers_28_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142430784)))]; + tensor layers_28_fc2_bias_to_fp16 = const()[name = tensor("layers_28_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155538048)))]; + tensor hidden_states_61_cast_fp16 = conv(bias = layers_28_fc2_bias_to_fp16, dilations = var_3589, groups = var_3484, pad = hidden_states_61_pad_0, pad_type = hidden_states_61_pad_type_0, strides = var_3587, weight = layers_28_fc2_weight_to_fp16, x = input_231_cast_fp16)[name = tensor("hidden_states_61_cast_fp16")]; + tensor inputs_117_cast_fp16 = add(x = inputs_115_cast_fp16, y = hidden_states_61_cast_fp16)[name = tensor("inputs_117_cast_fp16")]; + tensor var_3600 = const()[name = tensor("op_3600"), val = tensor(3)]; + tensor var_3602 = const()[name = tensor("op_3602"), val = tensor(1)]; + tensor var_3603 = const()[name = tensor("op_3603"), val = tensor(true)]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([1])]; + tensor channels_mean_117_cast_fp16 = reduce_mean(axes = var_3613, keep_dims = var_3603, x = inputs_117_cast_fp16)[name = tensor("channels_mean_117_cast_fp16")]; + tensor zero_mean_117_cast_fp16 = sub(x = inputs_117_cast_fp16, y = channels_mean_117_cast_fp16)[name = tensor("zero_mean_117_cast_fp16")]; + tensor zero_mean_sq_117_cast_fp16 = mul(x = zero_mean_117_cast_fp16, y = zero_mean_117_cast_fp16)[name = tensor("zero_mean_sq_117_cast_fp16")]; + tensor var_3617 = const()[name = tensor("op_3617"), val = tensor([1])]; + tensor var_3618_cast_fp16 = reduce_mean(axes = var_3617, keep_dims = var_3603, x = zero_mean_sq_117_cast_fp16)[name = tensor("op_3618_cast_fp16")]; + tensor var_3619_to_fp16 = const()[name = tensor("op_3619_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3620_cast_fp16 = add(x = var_3618_cast_fp16, y = var_3619_to_fp16)[name = tensor("op_3620_cast_fp16")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast_fp16 = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_3620_cast_fp16)[name = tensor("denom_117_cast_fp16")]; + tensor out_117_cast_fp16 = mul(x = zero_mean_117_cast_fp16, y = denom_117_cast_fp16)[name = tensor("out_117_cast_fp16")]; + tensor obj_117_gamma_0_to_fp16 = const()[name = tensor("obj_117_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155540672)))]; + tensor obj_117_beta_0_to_fp16 = const()[name = tensor("obj_117_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155543296)))]; + tensor obj_117_epsilon_0_to_fp16 = const()[name = tensor("obj_117_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_117_cast_fp16 = batch_norm(beta = obj_117_beta_0_to_fp16, epsilon = obj_117_epsilon_0_to_fp16, gamma = obj_117_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_117_cast_fp16)[name = tensor("obj_117_cast_fp16")]; + tensor var_3635 = const()[name = tensor("op_3635"), val = tensor([1, 1])]; + tensor var_3637 = const()[name = tensor("op_3637"), val = tensor([1, 1])]; + tensor query_59_pad_type_0 = const()[name = tensor("query_59_pad_type_0"), val = tensor("custom")]; + tensor query_59_pad_0 = const()[name = tensor("query_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_29_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1155545920)))]; + tensor layers_29_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_29_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158822784)))]; + tensor query_59_cast_fp16 = conv(bias = layers_29_self_attn_q_proj_bias_to_fp16, dilations = var_3637, groups = var_3602, pad = query_59_pad_0, pad_type = query_59_pad_type_0, strides = var_3635, weight = layers_29_self_attn_q_proj_weight_to_fp16, x = obj_117_cast_fp16)[name = tensor("query_59_cast_fp16")]; + tensor var_3641 = const()[name = tensor("op_3641"), val = tensor([1, 1])]; + tensor var_3643 = const()[name = tensor("op_3643"), val = tensor([1, 1])]; + tensor key_59_pad_type_0 = const()[name = tensor("key_59_pad_type_0"), val = tensor("custom")]; + tensor key_59_pad_0 = const()[name = tensor("key_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_29_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1158825408)))]; + tensor key_59_cast_fp16 = conv(dilations = var_3643, groups = var_3602, pad = key_59_pad_0, pad_type = key_59_pad_type_0, strides = var_3641, weight = layers_29_self_attn_k_proj_weight_to_fp16, x = obj_117_cast_fp16)[name = tensor("key_59_cast_fp16")]; + tensor var_3648 = const()[name = tensor("op_3648"), val = tensor([1, 1])]; + tensor var_3650 = const()[name = tensor("op_3650"), val = tensor([1, 1])]; + tensor value_59_pad_type_0 = const()[name = tensor("value_59_pad_type_0"), val = tensor("custom")]; + tensor value_59_pad_0 = const()[name = tensor("value_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_29_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162102272)))]; + tensor layers_29_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_29_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165379136)))]; + tensor value_59_cast_fp16 = conv(bias = layers_29_self_attn_v_proj_bias_to_fp16, dilations = var_3650, groups = var_3602, pad = value_59_pad_0, pad_type = value_59_pad_type_0, strides = var_3648, weight = layers_29_self_attn_v_proj_weight_to_fp16, x = obj_117_cast_fp16)[name = tensor("value_59_cast_fp16")]; + tensor var_3654 = const()[name = tensor("op_3654"), val = tensor([1, 20, 64, -1])]; + tensor var_3655_cast_fp16 = reshape(shape = var_3654, x = query_59_cast_fp16)[name = tensor("op_3655_cast_fp16")]; + tensor var_3656_to_fp16 = const()[name = tensor("op_3656_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3657_cast_fp16 = mul(x = var_3655_cast_fp16, y = var_3656_to_fp16)[name = tensor("op_3657_cast_fp16")]; + tensor var_3658 = const()[name = tensor("op_3658"), val = tensor([1, 20, 64, -1])]; + tensor var_3659_cast_fp16 = reshape(shape = var_3658, x = key_59_cast_fp16)[name = tensor("op_3659_cast_fp16")]; + tensor mh_w_59_transpose_x_0 = const()[name = tensor("mh_w_59_transpose_x_0"), val = tensor(true)]; + tensor mh_w_59_transpose_y_0 = const()[name = tensor("mh_w_59_transpose_y_0"), val = tensor(false)]; + tensor mh_w_59_cast_fp16 = matmul(transpose_x = mh_w_59_transpose_x_0, transpose_y = mh_w_59_transpose_y_0, x = var_3657_cast_fp16, y = var_3659_cast_fp16)[name = tensor("mh_w_59_cast_fp16")]; + tensor var_3662_cast_fp16 = softmax(axis = var_3600, x = mh_w_59_cast_fp16)[name = tensor("op_3662_cast_fp16")]; + tensor var_3663 = const()[name = tensor("op_3663"), val = tensor([1, 20, 64, -1])]; + tensor var_3664_cast_fp16 = reshape(shape = var_3663, x = value_59_cast_fp16)[name = tensor("op_3664_cast_fp16")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast_fp16 = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3664_cast_fp16, y = var_3662_cast_fp16)[name = tensor("attn_59_cast_fp16")]; + tensor var_3667 = const()[name = tensor("op_3667"), val = tensor([1, 1280, 1, -1])]; + tensor input_233_cast_fp16 = reshape(shape = var_3667, x = attn_59_cast_fp16)[name = tensor("input_233_cast_fp16")]; + tensor var_3671 = const()[name = tensor("op_3671"), val = tensor([1, 1])]; + tensor var_3673 = const()[name = tensor("op_3673"), val = tensor([1, 1])]; + tensor obj_119_pad_type_0 = const()[name = tensor("obj_119_pad_type_0"), val = tensor("custom")]; + tensor obj_119_pad_0 = const()[name = tensor("obj_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_29_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165381760)))]; + tensor layers_29_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_29_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168658624)))]; + tensor obj_119_cast_fp16 = conv(bias = layers_29_self_attn_o_proj_bias_to_fp16, dilations = var_3673, groups = var_3602, pad = obj_119_pad_0, pad_type = obj_119_pad_type_0, strides = var_3671, weight = layers_29_self_attn_o_proj_weight_to_fp16, x = input_233_cast_fp16)[name = tensor("obj_119_cast_fp16")]; + tensor inputs_119_cast_fp16 = add(x = inputs_117_cast_fp16, y = obj_119_cast_fp16)[name = tensor("inputs_119_cast_fp16")]; + tensor var_3679 = const()[name = tensor("op_3679"), val = tensor([1])]; + tensor channels_mean_119_cast_fp16 = reduce_mean(axes = var_3679, keep_dims = var_3603, x = inputs_119_cast_fp16)[name = tensor("channels_mean_119_cast_fp16")]; + tensor zero_mean_119_cast_fp16 = sub(x = inputs_119_cast_fp16, y = channels_mean_119_cast_fp16)[name = tensor("zero_mean_119_cast_fp16")]; + tensor zero_mean_sq_119_cast_fp16 = mul(x = zero_mean_119_cast_fp16, y = zero_mean_119_cast_fp16)[name = tensor("zero_mean_sq_119_cast_fp16")]; + tensor var_3683 = const()[name = tensor("op_3683"), val = tensor([1])]; + tensor var_3684_cast_fp16 = reduce_mean(axes = var_3683, keep_dims = var_3603, x = zero_mean_sq_119_cast_fp16)[name = tensor("op_3684_cast_fp16")]; + tensor var_3685_to_fp16 = const()[name = tensor("op_3685_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3686_cast_fp16 = add(x = var_3684_cast_fp16, y = var_3685_to_fp16)[name = tensor("op_3686_cast_fp16")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast_fp16 = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_3686_cast_fp16)[name = tensor("denom_119_cast_fp16")]; + tensor out_119_cast_fp16 = mul(x = zero_mean_119_cast_fp16, y = denom_119_cast_fp16)[name = tensor("out_119_cast_fp16")]; + tensor input_235_gamma_0_to_fp16 = const()[name = tensor("input_235_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168661248)))]; + tensor input_235_beta_0_to_fp16 = const()[name = tensor("input_235_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168663872)))]; + tensor input_235_epsilon_0_to_fp16 = const()[name = tensor("input_235_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_235_cast_fp16 = batch_norm(beta = input_235_beta_0_to_fp16, epsilon = input_235_epsilon_0_to_fp16, gamma = input_235_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_119_cast_fp16)[name = tensor("input_235_cast_fp16")]; + tensor var_3697 = const()[name = tensor("op_3697"), val = tensor([1, 1])]; + tensor var_3699 = const()[name = tensor("op_3699"), val = tensor([1, 1])]; + tensor input_237_pad_type_0 = const()[name = tensor("input_237_pad_type_0"), val = tensor("custom")]; + tensor input_237_pad_0 = const()[name = tensor("input_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_fc1_weight_to_fp16 = const()[name = tensor("layers_29_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168666496)))]; + tensor layers_29_fc1_bias_to_fp16 = const()[name = tensor("layers_29_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1181773760)))]; + tensor input_237_cast_fp16 = conv(bias = layers_29_fc1_bias_to_fp16, dilations = var_3699, groups = var_3602, pad = input_237_pad_0, pad_type = input_237_pad_type_0, strides = var_3697, weight = layers_29_fc1_weight_to_fp16, x = input_235_cast_fp16)[name = tensor("input_237_cast_fp16")]; + tensor input_239_mode_0 = const()[name = tensor("input_239_mode_0"), val = tensor("EXACT")]; + tensor input_239_cast_fp16 = gelu(mode = input_239_mode_0, x = input_237_cast_fp16)[name = tensor("input_239_cast_fp16")]; + tensor var_3705 = const()[name = tensor("op_3705"), val = tensor([1, 1])]; + tensor var_3707 = const()[name = tensor("op_3707"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_29_fc2_weight_to_fp16 = const()[name = tensor("layers_29_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1181784064)))]; + tensor layers_29_fc2_bias_to_fp16 = const()[name = tensor("layers_29_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1194891328)))]; + tensor hidden_states_63_cast_fp16 = conv(bias = layers_29_fc2_bias_to_fp16, dilations = var_3707, groups = var_3602, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_3705, weight = layers_29_fc2_weight_to_fp16, x = input_239_cast_fp16)[name = tensor("hidden_states_63_cast_fp16")]; + tensor inputs_121_cast_fp16 = add(x = inputs_119_cast_fp16, y = hidden_states_63_cast_fp16)[name = tensor("inputs_121_cast_fp16")]; + tensor var_3718 = const()[name = tensor("op_3718"), val = tensor(3)]; + tensor var_3720 = const()[name = tensor("op_3720"), val = tensor(1)]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor(true)]; + tensor var_3731 = const()[name = tensor("op_3731"), val = tensor([1])]; + tensor channels_mean_121_cast_fp16 = reduce_mean(axes = var_3731, keep_dims = var_3721, x = inputs_121_cast_fp16)[name = tensor("channels_mean_121_cast_fp16")]; + tensor zero_mean_121_cast_fp16 = sub(x = inputs_121_cast_fp16, y = channels_mean_121_cast_fp16)[name = tensor("zero_mean_121_cast_fp16")]; + tensor zero_mean_sq_121_cast_fp16 = mul(x = zero_mean_121_cast_fp16, y = zero_mean_121_cast_fp16)[name = tensor("zero_mean_sq_121_cast_fp16")]; + tensor var_3735 = const()[name = tensor("op_3735"), val = tensor([1])]; + tensor var_3736_cast_fp16 = reduce_mean(axes = var_3735, keep_dims = var_3721, x = zero_mean_sq_121_cast_fp16)[name = tensor("op_3736_cast_fp16")]; + tensor var_3737_to_fp16 = const()[name = tensor("op_3737_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3738_cast_fp16 = add(x = var_3736_cast_fp16, y = var_3737_to_fp16)[name = tensor("op_3738_cast_fp16")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast_fp16 = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_3738_cast_fp16)[name = tensor("denom_121_cast_fp16")]; + tensor out_121_cast_fp16 = mul(x = zero_mean_121_cast_fp16, y = denom_121_cast_fp16)[name = tensor("out_121_cast_fp16")]; + tensor obj_121_gamma_0_to_fp16 = const()[name = tensor("obj_121_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1194893952)))]; + tensor obj_121_beta_0_to_fp16 = const()[name = tensor("obj_121_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1194896576)))]; + tensor obj_121_epsilon_0_to_fp16 = const()[name = tensor("obj_121_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_121_cast_fp16 = batch_norm(beta = obj_121_beta_0_to_fp16, epsilon = obj_121_epsilon_0_to_fp16, gamma = obj_121_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_121_cast_fp16)[name = tensor("obj_121_cast_fp16")]; + tensor var_3753 = const()[name = tensor("op_3753"), val = tensor([1, 1])]; + tensor var_3755 = const()[name = tensor("op_3755"), val = tensor([1, 1])]; + tensor query_61_pad_type_0 = const()[name = tensor("query_61_pad_type_0"), val = tensor("custom")]; + tensor query_61_pad_0 = const()[name = tensor("query_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_30_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1194899200)))]; + tensor layers_30_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_30_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1198176064)))]; + tensor query_61_cast_fp16 = conv(bias = layers_30_self_attn_q_proj_bias_to_fp16, dilations = var_3755, groups = var_3720, pad = query_61_pad_0, pad_type = query_61_pad_type_0, strides = var_3753, weight = layers_30_self_attn_q_proj_weight_to_fp16, x = obj_121_cast_fp16)[name = tensor("query_61_cast_fp16")]; + tensor var_3759 = const()[name = tensor("op_3759"), val = tensor([1, 1])]; + tensor var_3761 = const()[name = tensor("op_3761"), val = tensor([1, 1])]; + tensor key_61_pad_type_0 = const()[name = tensor("key_61_pad_type_0"), val = tensor("custom")]; + tensor key_61_pad_0 = const()[name = tensor("key_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_30_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1198178688)))]; + tensor key_61_cast_fp16 = conv(dilations = var_3761, groups = var_3720, pad = key_61_pad_0, pad_type = key_61_pad_type_0, strides = var_3759, weight = layers_30_self_attn_k_proj_weight_to_fp16, x = obj_121_cast_fp16)[name = tensor("key_61_cast_fp16")]; + tensor var_3766 = const()[name = tensor("op_3766"), val = tensor([1, 1])]; + tensor var_3768 = const()[name = tensor("op_3768"), val = tensor([1, 1])]; + tensor value_61_pad_type_0 = const()[name = tensor("value_61_pad_type_0"), val = tensor("custom")]; + tensor value_61_pad_0 = const()[name = tensor("value_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_30_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201455552)))]; + tensor layers_30_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_30_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204732416)))]; + tensor value_61_cast_fp16 = conv(bias = layers_30_self_attn_v_proj_bias_to_fp16, dilations = var_3768, groups = var_3720, pad = value_61_pad_0, pad_type = value_61_pad_type_0, strides = var_3766, weight = layers_30_self_attn_v_proj_weight_to_fp16, x = obj_121_cast_fp16)[name = tensor("value_61_cast_fp16")]; + tensor var_3772 = const()[name = tensor("op_3772"), val = tensor([1, 20, 64, -1])]; + tensor var_3773_cast_fp16 = reshape(shape = var_3772, x = query_61_cast_fp16)[name = tensor("op_3773_cast_fp16")]; + tensor var_3774_to_fp16 = const()[name = tensor("op_3774_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3775_cast_fp16 = mul(x = var_3773_cast_fp16, y = var_3774_to_fp16)[name = tensor("op_3775_cast_fp16")]; + tensor var_3776 = const()[name = tensor("op_3776"), val = tensor([1, 20, 64, -1])]; + tensor var_3777_cast_fp16 = reshape(shape = var_3776, x = key_61_cast_fp16)[name = tensor("op_3777_cast_fp16")]; + tensor mh_w_61_transpose_x_0 = const()[name = tensor("mh_w_61_transpose_x_0"), val = tensor(true)]; + tensor mh_w_61_transpose_y_0 = const()[name = tensor("mh_w_61_transpose_y_0"), val = tensor(false)]; + tensor mh_w_61_cast_fp16 = matmul(transpose_x = mh_w_61_transpose_x_0, transpose_y = mh_w_61_transpose_y_0, x = var_3775_cast_fp16, y = var_3777_cast_fp16)[name = tensor("mh_w_61_cast_fp16")]; + tensor var_3780_cast_fp16 = softmax(axis = var_3718, x = mh_w_61_cast_fp16)[name = tensor("op_3780_cast_fp16")]; + tensor var_3781 = const()[name = tensor("op_3781"), val = tensor([1, 20, 64, -1])]; + tensor var_3782_cast_fp16 = reshape(shape = var_3781, x = value_61_cast_fp16)[name = tensor("op_3782_cast_fp16")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast_fp16 = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3782_cast_fp16, y = var_3780_cast_fp16)[name = tensor("attn_61_cast_fp16")]; + tensor var_3785 = const()[name = tensor("op_3785"), val = tensor([1, 1280, 1, -1])]; + tensor input_241_cast_fp16 = reshape(shape = var_3785, x = attn_61_cast_fp16)[name = tensor("input_241_cast_fp16")]; + tensor var_3789 = const()[name = tensor("op_3789"), val = tensor([1, 1])]; + tensor var_3791 = const()[name = tensor("op_3791"), val = tensor([1, 1])]; + tensor obj_123_pad_type_0 = const()[name = tensor("obj_123_pad_type_0"), val = tensor("custom")]; + tensor obj_123_pad_0 = const()[name = tensor("obj_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_30_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204735040)))]; + tensor layers_30_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_30_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208011904)))]; + tensor obj_123_cast_fp16 = conv(bias = layers_30_self_attn_o_proj_bias_to_fp16, dilations = var_3791, groups = var_3720, pad = obj_123_pad_0, pad_type = obj_123_pad_type_0, strides = var_3789, weight = layers_30_self_attn_o_proj_weight_to_fp16, x = input_241_cast_fp16)[name = tensor("obj_123_cast_fp16")]; + tensor inputs_123_cast_fp16 = add(x = inputs_121_cast_fp16, y = obj_123_cast_fp16)[name = tensor("inputs_123_cast_fp16")]; + tensor var_3797 = const()[name = tensor("op_3797"), val = tensor([1])]; + tensor channels_mean_123_cast_fp16 = reduce_mean(axes = var_3797, keep_dims = var_3721, x = inputs_123_cast_fp16)[name = tensor("channels_mean_123_cast_fp16")]; + tensor zero_mean_123_cast_fp16 = sub(x = inputs_123_cast_fp16, y = channels_mean_123_cast_fp16)[name = tensor("zero_mean_123_cast_fp16")]; + tensor zero_mean_sq_123_cast_fp16 = mul(x = zero_mean_123_cast_fp16, y = zero_mean_123_cast_fp16)[name = tensor("zero_mean_sq_123_cast_fp16")]; + tensor var_3801 = const()[name = tensor("op_3801"), val = tensor([1])]; + tensor var_3802_cast_fp16 = reduce_mean(axes = var_3801, keep_dims = var_3721, x = zero_mean_sq_123_cast_fp16)[name = tensor("op_3802_cast_fp16")]; + tensor var_3803_to_fp16 = const()[name = tensor("op_3803_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3804_cast_fp16 = add(x = var_3802_cast_fp16, y = var_3803_to_fp16)[name = tensor("op_3804_cast_fp16")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast_fp16 = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_3804_cast_fp16)[name = tensor("denom_123_cast_fp16")]; + tensor out_123_cast_fp16 = mul(x = zero_mean_123_cast_fp16, y = denom_123_cast_fp16)[name = tensor("out_123_cast_fp16")]; + tensor input_243_gamma_0_to_fp16 = const()[name = tensor("input_243_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208014528)))]; + tensor input_243_beta_0_to_fp16 = const()[name = tensor("input_243_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208017152)))]; + tensor input_243_epsilon_0_to_fp16 = const()[name = tensor("input_243_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_243_cast_fp16 = batch_norm(beta = input_243_beta_0_to_fp16, epsilon = input_243_epsilon_0_to_fp16, gamma = input_243_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_123_cast_fp16)[name = tensor("input_243_cast_fp16")]; + tensor var_3815 = const()[name = tensor("op_3815"), val = tensor([1, 1])]; + tensor var_3817 = const()[name = tensor("op_3817"), val = tensor([1, 1])]; + tensor input_245_pad_type_0 = const()[name = tensor("input_245_pad_type_0"), val = tensor("custom")]; + tensor input_245_pad_0 = const()[name = tensor("input_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_fc1_weight_to_fp16 = const()[name = tensor("layers_30_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1208019776)))]; + tensor layers_30_fc1_bias_to_fp16 = const()[name = tensor("layers_30_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1221127040)))]; + tensor input_245_cast_fp16 = conv(bias = layers_30_fc1_bias_to_fp16, dilations = var_3817, groups = var_3720, pad = input_245_pad_0, pad_type = input_245_pad_type_0, strides = var_3815, weight = layers_30_fc1_weight_to_fp16, x = input_243_cast_fp16)[name = tensor("input_245_cast_fp16")]; + tensor input_247_mode_0 = const()[name = tensor("input_247_mode_0"), val = tensor("EXACT")]; + tensor input_247_cast_fp16 = gelu(mode = input_247_mode_0, x = input_245_cast_fp16)[name = tensor("input_247_cast_fp16")]; + tensor var_3823 = const()[name = tensor("op_3823"), val = tensor([1, 1])]; + tensor var_3825 = const()[name = tensor("op_3825"), val = tensor([1, 1])]; + tensor hidden_states_65_pad_type_0 = const()[name = tensor("hidden_states_65_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_65_pad_0 = const()[name = tensor("hidden_states_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_30_fc2_weight_to_fp16 = const()[name = tensor("layers_30_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1221137344)))]; + tensor layers_30_fc2_bias_to_fp16 = const()[name = tensor("layers_30_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234244608)))]; + tensor hidden_states_65_cast_fp16 = conv(bias = layers_30_fc2_bias_to_fp16, dilations = var_3825, groups = var_3720, pad = hidden_states_65_pad_0, pad_type = hidden_states_65_pad_type_0, strides = var_3823, weight = layers_30_fc2_weight_to_fp16, x = input_247_cast_fp16)[name = tensor("hidden_states_65_cast_fp16")]; + tensor inputs_125_cast_fp16 = add(x = inputs_123_cast_fp16, y = hidden_states_65_cast_fp16)[name = tensor("inputs_125_cast_fp16")]; + tensor var_3836 = const()[name = tensor("op_3836"), val = tensor(3)]; + tensor var_3838 = const()[name = tensor("op_3838"), val = tensor(1)]; + tensor var_3839 = const()[name = tensor("op_3839"), val = tensor(true)]; + tensor var_3849 = const()[name = tensor("op_3849"), val = tensor([1])]; + tensor channels_mean_125_cast_fp16 = reduce_mean(axes = var_3849, keep_dims = var_3839, x = inputs_125_cast_fp16)[name = tensor("channels_mean_125_cast_fp16")]; + tensor zero_mean_125_cast_fp16 = sub(x = inputs_125_cast_fp16, y = channels_mean_125_cast_fp16)[name = tensor("zero_mean_125_cast_fp16")]; + tensor zero_mean_sq_125_cast_fp16 = mul(x = zero_mean_125_cast_fp16, y = zero_mean_125_cast_fp16)[name = tensor("zero_mean_sq_125_cast_fp16")]; + tensor var_3853 = const()[name = tensor("op_3853"), val = tensor([1])]; + tensor var_3854_cast_fp16 = reduce_mean(axes = var_3853, keep_dims = var_3839, x = zero_mean_sq_125_cast_fp16)[name = tensor("op_3854_cast_fp16")]; + tensor var_3855_to_fp16 = const()[name = tensor("op_3855_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3856_cast_fp16 = add(x = var_3854_cast_fp16, y = var_3855_to_fp16)[name = tensor("op_3856_cast_fp16")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast_fp16 = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_3856_cast_fp16)[name = tensor("denom_125_cast_fp16")]; + tensor out_125_cast_fp16 = mul(x = zero_mean_125_cast_fp16, y = denom_125_cast_fp16)[name = tensor("out_125_cast_fp16")]; + tensor obj_125_gamma_0_to_fp16 = const()[name = tensor("obj_125_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234247232)))]; + tensor obj_125_beta_0_to_fp16 = const()[name = tensor("obj_125_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234249856)))]; + tensor obj_125_epsilon_0_to_fp16 = const()[name = tensor("obj_125_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor obj_125_cast_fp16 = batch_norm(beta = obj_125_beta_0_to_fp16, epsilon = obj_125_epsilon_0_to_fp16, gamma = obj_125_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_125_cast_fp16)[name = tensor("obj_125_cast_fp16")]; + tensor var_3871 = const()[name = tensor("op_3871"), val = tensor([1, 1])]; + tensor var_3873 = const()[name = tensor("op_3873"), val = tensor([1, 1])]; + tensor query_pad_type_0 = const()[name = tensor("query_pad_type_0"), val = tensor("custom")]; + tensor query_pad_0 = const()[name = tensor("query_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_31_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234252480)))]; + tensor layers_31_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_31_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1237529344)))]; + tensor query_cast_fp16 = conv(bias = layers_31_self_attn_q_proj_bias_to_fp16, dilations = var_3873, groups = var_3838, pad = query_pad_0, pad_type = query_pad_type_0, strides = var_3871, weight = layers_31_self_attn_q_proj_weight_to_fp16, x = obj_125_cast_fp16)[name = tensor("query_cast_fp16")]; + tensor var_3877 = const()[name = tensor("op_3877"), val = tensor([1, 1])]; + tensor var_3879 = const()[name = tensor("op_3879"), val = tensor([1, 1])]; + tensor key_pad_type_0 = const()[name = tensor("key_pad_type_0"), val = tensor("custom")]; + tensor key_pad_0 = const()[name = tensor("key_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_31_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1237531968)))]; + tensor key_cast_fp16 = conv(dilations = var_3879, groups = var_3838, pad = key_pad_0, pad_type = key_pad_type_0, strides = var_3877, weight = layers_31_self_attn_k_proj_weight_to_fp16, x = obj_125_cast_fp16)[name = tensor("key_cast_fp16")]; + tensor var_3884 = const()[name = tensor("op_3884"), val = tensor([1, 1])]; + tensor var_3886 = const()[name = tensor("op_3886"), val = tensor([1, 1])]; + tensor value_pad_type_0 = const()[name = tensor("value_pad_type_0"), val = tensor("custom")]; + tensor value_pad_0 = const()[name = tensor("value_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_31_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1240808832)))]; + tensor layers_31_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_31_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1244085696)))]; + tensor value_cast_fp16 = conv(bias = layers_31_self_attn_v_proj_bias_to_fp16, dilations = var_3886, groups = var_3838, pad = value_pad_0, pad_type = value_pad_type_0, strides = var_3884, weight = layers_31_self_attn_v_proj_weight_to_fp16, x = obj_125_cast_fp16)[name = tensor("value_cast_fp16")]; + tensor var_3890 = const()[name = tensor("op_3890"), val = tensor([1, 20, 64, -1])]; + tensor var_3891_cast_fp16 = reshape(shape = var_3890, x = query_cast_fp16)[name = tensor("op_3891_cast_fp16")]; + tensor var_3892_to_fp16 = const()[name = tensor("op_3892_to_fp16"), val = tensor(0x1p-3)]; + tensor var_3893_cast_fp16 = mul(x = var_3891_cast_fp16, y = var_3892_to_fp16)[name = tensor("op_3893_cast_fp16")]; + tensor var_3894 = const()[name = tensor("op_3894"), val = tensor([1, 20, 64, -1])]; + tensor var_3895_cast_fp16 = reshape(shape = var_3894, x = key_cast_fp16)[name = tensor("op_3895_cast_fp16")]; + tensor mh_w_transpose_x_0 = const()[name = tensor("mh_w_transpose_x_0"), val = tensor(true)]; + tensor mh_w_transpose_y_0 = const()[name = tensor("mh_w_transpose_y_0"), val = tensor(false)]; + tensor mh_w_cast_fp16 = matmul(transpose_x = mh_w_transpose_x_0, transpose_y = mh_w_transpose_y_0, x = var_3893_cast_fp16, y = var_3895_cast_fp16)[name = tensor("mh_w_cast_fp16")]; + tensor var_3898_cast_fp16 = softmax(axis = var_3836, x = mh_w_cast_fp16)[name = tensor("op_3898_cast_fp16")]; + tensor var_3899 = const()[name = tensor("op_3899"), val = tensor([1, 20, 64, -1])]; + tensor var_3900_cast_fp16 = reshape(shape = var_3899, x = value_cast_fp16)[name = tensor("op_3900_cast_fp16")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_3900_cast_fp16, y = var_3898_cast_fp16)[name = tensor("attn_cast_fp16")]; + tensor var_3903 = const()[name = tensor("op_3903"), val = tensor([1, 1280, 1, -1])]; + tensor input_249_cast_fp16 = reshape(shape = var_3903, x = attn_cast_fp16)[name = tensor("input_249_cast_fp16")]; + tensor var_3907 = const()[name = tensor("op_3907"), val = tensor([1, 1])]; + tensor var_3909 = const()[name = tensor("op_3909"), val = tensor([1, 1])]; + tensor obj_pad_type_0 = const()[name = tensor("obj_pad_type_0"), val = tensor("custom")]; + tensor obj_pad_0 = const()[name = tensor("obj_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_31_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1244088320)))]; + tensor layers_31_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_31_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247365184)))]; + tensor obj_cast_fp16 = conv(bias = layers_31_self_attn_o_proj_bias_to_fp16, dilations = var_3909, groups = var_3838, pad = obj_pad_0, pad_type = obj_pad_type_0, strides = var_3907, weight = layers_31_self_attn_o_proj_weight_to_fp16, x = input_249_cast_fp16)[name = tensor("obj_cast_fp16")]; + tensor inputs_127_cast_fp16 = add(x = inputs_125_cast_fp16, y = obj_cast_fp16)[name = tensor("inputs_127_cast_fp16")]; + tensor var_3915 = const()[name = tensor("op_3915"), val = tensor([1])]; + tensor channels_mean_127_cast_fp16 = reduce_mean(axes = var_3915, keep_dims = var_3839, x = inputs_127_cast_fp16)[name = tensor("channels_mean_127_cast_fp16")]; + tensor zero_mean_127_cast_fp16 = sub(x = inputs_127_cast_fp16, y = channels_mean_127_cast_fp16)[name = tensor("zero_mean_127_cast_fp16")]; + tensor zero_mean_sq_127_cast_fp16 = mul(x = zero_mean_127_cast_fp16, y = zero_mean_127_cast_fp16)[name = tensor("zero_mean_sq_127_cast_fp16")]; + tensor var_3919 = const()[name = tensor("op_3919"), val = tensor([1])]; + tensor var_3920_cast_fp16 = reduce_mean(axes = var_3919, keep_dims = var_3839, x = zero_mean_sq_127_cast_fp16)[name = tensor("op_3920_cast_fp16")]; + tensor var_3921_to_fp16 = const()[name = tensor("op_3921_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3922_cast_fp16 = add(x = var_3920_cast_fp16, y = var_3921_to_fp16)[name = tensor("op_3922_cast_fp16")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast_fp16 = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_3922_cast_fp16)[name = tensor("denom_127_cast_fp16")]; + tensor out_127_cast_fp16 = mul(x = zero_mean_127_cast_fp16, y = denom_127_cast_fp16)[name = tensor("out_127_cast_fp16")]; + tensor input_251_gamma_0_to_fp16 = const()[name = tensor("input_251_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247367808)))]; + tensor input_251_beta_0_to_fp16 = const()[name = tensor("input_251_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247370432)))]; + tensor input_251_epsilon_0_to_fp16 = const()[name = tensor("input_251_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor input_251_cast_fp16 = batch_norm(beta = input_251_beta_0_to_fp16, epsilon = input_251_epsilon_0_to_fp16, gamma = input_251_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_127_cast_fp16)[name = tensor("input_251_cast_fp16")]; + tensor var_3933 = const()[name = tensor("op_3933"), val = tensor([1, 1])]; + tensor var_3935 = const()[name = tensor("op_3935"), val = tensor([1, 1])]; + tensor input_253_pad_type_0 = const()[name = tensor("input_253_pad_type_0"), val = tensor("custom")]; + tensor input_253_pad_0 = const()[name = tensor("input_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_fc1_weight_to_fp16 = const()[name = tensor("layers_31_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247373056)))]; + tensor layers_31_fc1_bias_to_fp16 = const()[name = tensor("layers_31_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1260480320)))]; + tensor input_253_cast_fp16 = conv(bias = layers_31_fc1_bias_to_fp16, dilations = var_3935, groups = var_3838, pad = input_253_pad_0, pad_type = input_253_pad_type_0, strides = var_3933, weight = layers_31_fc1_weight_to_fp16, x = input_251_cast_fp16)[name = tensor("input_253_cast_fp16")]; + tensor input_mode_0 = const()[name = tensor("input_mode_0"), val = tensor("EXACT")]; + tensor input_cast_fp16 = gelu(mode = input_mode_0, x = input_253_cast_fp16)[name = tensor("input_cast_fp16")]; + tensor var_3941 = const()[name = tensor("op_3941"), val = tensor([1, 1])]; + tensor var_3943 = const()[name = tensor("op_3943"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor layers_31_fc2_weight_to_fp16 = const()[name = tensor("layers_31_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1260490624)))]; + tensor layers_31_fc2_bias_to_fp16 = const()[name = tensor("layers_31_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273597888)))]; + tensor hidden_states_cast_fp16 = conv(bias = layers_31_fc2_bias_to_fp16, dilations = var_3943, groups = var_3838, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_3941, weight = layers_31_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor("hidden_states_cast_fp16")]; + tensor inputs_cast_fp16 = add(x = inputs_127_cast_fp16, y = hidden_states_cast_fp16)[name = tensor("inputs_cast_fp16")]; + tensor var_3949 = const()[name = tensor("op_3949"), val = tensor(true)]; + tensor var_3953 = const()[name = tensor("op_3953"), val = tensor([1])]; + tensor channels_mean_cast_fp16 = reduce_mean(axes = var_3953, keep_dims = var_3949, x = inputs_cast_fp16)[name = tensor("channels_mean_cast_fp16")]; + tensor zero_mean_cast_fp16 = sub(x = inputs_cast_fp16, y = channels_mean_cast_fp16)[name = tensor("zero_mean_cast_fp16")]; + tensor zero_mean_sq_cast_fp16 = mul(x = zero_mean_cast_fp16, y = zero_mean_cast_fp16)[name = tensor("zero_mean_sq_cast_fp16")]; + tensor var_3957 = const()[name = tensor("op_3957"), val = tensor([1])]; + tensor var_3958_cast_fp16 = reduce_mean(axes = var_3957, keep_dims = var_3949, x = zero_mean_sq_cast_fp16)[name = tensor("op_3958_cast_fp16")]; + tensor var_3959_to_fp16 = const()[name = tensor("op_3959_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3960_cast_fp16 = add(x = var_3958_cast_fp16, y = var_3959_to_fp16)[name = tensor("op_3960_cast_fp16")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast_fp16 = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_3960_cast_fp16)[name = tensor("denom_cast_fp16")]; + tensor out_cast_fp16 = mul(x = zero_mean_cast_fp16, y = denom_cast_fp16)[name = tensor("out_cast_fp16")]; + tensor encoder_output_embeds_type_fp32_gamma_0_to_fp16 = const()[name = tensor("encoder_output_embeds_type_fp32_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273600512)))]; + tensor encoder_output_embeds_type_fp32_beta_0_to_fp16 = const()[name = tensor("encoder_output_embeds_type_fp32_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273603136)))]; + tensor encoder_output_embeds_type_fp32_epsilon_0_to_fp16 = const()[name = tensor("encoder_output_embeds_type_fp32_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor encoder_output_embeds = batch_norm(beta = encoder_output_embeds_type_fp32_beta_0_to_fp16, epsilon = encoder_output_embeds_type_fp32_epsilon_0_to_fp16, gamma = encoder_output_embeds_type_fp32_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_cast_fp16)[name = tensor("encoder_output_embeds_type_fp32_cast_fp16")]; + } -> (encoder_output_embeds); +} \ No newline at end of file