diff --git "a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil" "b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil" new file mode 100644--- /dev/null +++ "b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil" @@ -0,0 +1,2275 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1839.0.0"}, {"coremltools-component-torch", "2.0.1+cu117"}, {"coremltools-version", "7.0b1"}})] +{ + func main(tensor input_ids) { + tensor var_5 = const()[name = tensor("op_5"), val = tensor(-1)]; + tensor var_6 = const()[name = tensor("op_6"), val = tensor(false)]; + tensor cast_1_dtype_0 = const()[name = tensor("cast_1_dtype_0"), val = tensor("int32")]; + tensor inputs_embeds_axis_0 = const()[name = tensor("inputs_embeds_axis_0"), val = tensor(0)]; + tensor inputs_embeds_batch_dims_0 = const()[name = tensor("inputs_embeds_batch_dims_0"), val = tensor(0)]; + tensor text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor cast_1322 = cast(dtype = cast_1_dtype_0, x = input_ids)[name = tensor("cast_1322")]; + tensor inputs_embeds_cast = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = cast_1322, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor("inputs_embeds_cast")]; + tensor position_embeddings_to_fp16 = const()[name = tensor("position_embeddings_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126484608)))]; + tensor input_3_cast = add(x = inputs_embeds_cast, y = position_embeddings_to_fp16)[name = tensor("input_3_cast")]; + tensor hidden_states_1_axes_0 = const()[name = tensor("hidden_states_1_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126681792)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126684416)))]; + tensor var_12_to_fp16 = const()[name = tensor("op_12_to_fp16"), val = tensor(0x1.5p-17)]; + tensor hidden_states_1_cast = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast)[name = tensor("hidden_states_1_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126687040)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129963904)))]; + tensor var_128_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("op_128_cast")]; + tensor var_129_to_fp16 = const()[name = tensor("op_129_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_5_cast = mul(x = var_128_cast, y = var_129_to_fp16)[name = tensor("tensor_5_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129966528)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133243392)))]; + tensor tensor_1_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_1_cast")]; + tensor var_134 = const()[name = tensor("op_134"), val = tensor([1, -1, 20, 64])]; + tensor var_135_cast = reshape(shape = var_134, x = tensor_1_cast)[name = tensor("op_135_cast")]; + tensor var_136_perm_0 = const()[name = tensor("op_136_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133246016)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136522880)))]; + tensor tensor_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_3_cast")]; + tensor var_141 = const()[name = tensor("op_141"), val = tensor([1, -1, 20, 64])]; + tensor var_142_cast = reshape(shape = var_141, x = tensor_3_cast)[name = tensor("op_142_cast")]; + tensor var_143_perm_0 = const()[name = tensor("op_143_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 77, 20, 64])]; + tensor var_151_cast = reshape(shape = var_150, x = tensor_5_cast)[name = tensor("op_151_cast")]; + tensor var_152_perm_0 = const()[name = tensor("op_152_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_154 = const()[name = tensor("op_154"), val = tensor([20, -1, 64])]; + tensor transpose_158 = transpose(perm = var_152_perm_0, x = var_151_cast)[name = tensor("transpose_158")]; + tensor query_states_1_cast = reshape(shape = var_154, x = transpose_158)[name = tensor("query_states_1_cast")]; + tensor var_156 = const()[name = tensor("op_156"), val = tensor([20, -1, 64])]; + tensor transpose_160 = transpose(perm = var_136_perm_0, x = var_135_cast)[name = tensor("transpose_160")]; + tensor key_states_3_cast = reshape(shape = var_156, x = transpose_160)[name = tensor("key_states_3_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor([20, -1, 64])]; + tensor transpose_159 = transpose(perm = var_143_perm_0, x = var_142_cast)[name = tensor("transpose_159")]; + tensor value_states_3_cast = reshape(shape = var_158, x = transpose_159)[name = tensor("value_states_3_cast")]; + tensor var_161_perm_0 = const()[name = tensor("op_161_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor transpose_157 = transpose(perm = var_161_perm_0, x = key_states_3_cast)[name = tensor("transpose_157")]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1_cast, y = transpose_157)[name = tensor("attn_weights_1_cast")]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 20, 77, 77])]; + tensor var_164_cast = reshape(shape = var_163, x = attn_weights_1_cast)[name = tensor("op_164_cast")]; + tensor causal_attention_mask_to_fp16 = const()[name = tensor("causal_attention_mask_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136525504)))]; + tensor attn_weights_3_cast = add(x = var_164_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_169 = const()[name = tensor("op_169"), val = tensor([20, 77, 77])]; + tensor input_5_cast = reshape(shape = var_169, x = attn_weights_3_cast)[name = tensor("input_5_cast")]; + tensor input_7_cast = softmax(axis = var_5, x = input_5_cast)[name = tensor("input_7_cast")]; + tensor attn_output_1_transpose_x_0 = const()[name = tensor("attn_output_1_transpose_x_0"), val = tensor(false)]; + tensor attn_output_1_transpose_y_0 = const()[name = tensor("attn_output_1_transpose_y_0"), val = tensor(false)]; + tensor attn_output_1_cast = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast, y = value_states_3_cast)[name = tensor("attn_output_1_cast")]; + tensor var_174 = const()[name = tensor("op_174"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_3_cast = reshape(shape = var_174, x = attn_output_1_cast)[name = tensor("attn_output_3_cast")]; + tensor attn_output_5_perm_0 = const()[name = tensor("attn_output_5_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_177 = const()[name = tensor("op_177"), val = tensor([1, 77, 1280])]; + tensor transpose_156 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast)[name = tensor("transpose_156")]; + tensor input_9_cast = reshape(shape = var_177, x = transpose_156)[name = tensor("input_9_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136537472)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139814336)))]; + tensor hidden_states_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16, x = input_9_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_11_cast = add(x = input_3_cast, y = hidden_states_3_cast)[name = tensor("input_11_cast")]; + tensor input_13_axes_0 = const()[name = tensor("input_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139816960)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139819584)))]; + tensor input_13_cast = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast)[name = tensor("input_13_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139822208)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152929472)))]; + tensor input_15_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16, x = input_13_cast)[name = tensor("input_15_cast")]; + tensor input_17_mode_0 = const()[name = tensor("input_17_mode_0"), val = tensor("EXACT")]; + tensor input_17_cast = gelu(mode = input_17_mode_0, x = input_15_cast)[name = tensor("input_17_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152939776)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166047040)))]; + tensor hidden_states_5_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_5_cast")]; + tensor input_19_cast = add(x = input_11_cast, y = hidden_states_5_cast)[name = tensor("input_19_cast")]; + tensor hidden_states_7_axes_0 = const()[name = tensor("hidden_states_7_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166049664)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166052288)))]; + tensor hidden_states_7_cast = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast)[name = tensor("hidden_states_7_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166054912)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169331776)))]; + tensor var_215_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("op_215_cast")]; + tensor var_216_to_fp16 = const()[name = tensor("op_216_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_11_cast = mul(x = var_215_cast, y = var_216_to_fp16)[name = tensor("tensor_11_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169334400)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172611264)))]; + tensor tensor_7_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_7_cast")]; + tensor var_221 = const()[name = tensor("op_221"), val = tensor([1, -1, 20, 64])]; + tensor var_222_cast = reshape(shape = var_221, x = tensor_7_cast)[name = tensor("op_222_cast")]; + tensor var_223_perm_0 = const()[name = tensor("op_223_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172613888)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175890752)))]; + tensor tensor_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_9_cast")]; + tensor var_228 = const()[name = tensor("op_228"), val = tensor([1, -1, 20, 64])]; + tensor var_229_cast = reshape(shape = var_228, x = tensor_9_cast)[name = tensor("op_229_cast")]; + tensor var_230_perm_0 = const()[name = tensor("op_230_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 77, 20, 64])]; + tensor var_238_cast = reshape(shape = var_237, x = tensor_11_cast)[name = tensor("op_238_cast")]; + tensor var_239_perm_0 = const()[name = tensor("op_239_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_241 = const()[name = tensor("op_241"), val = tensor([20, -1, 64])]; + tensor transpose_153 = transpose(perm = var_239_perm_0, x = var_238_cast)[name = tensor("transpose_153")]; + tensor query_states_3_cast = reshape(shape = var_241, x = transpose_153)[name = tensor("query_states_3_cast")]; + tensor var_243 = const()[name = tensor("op_243"), val = tensor([20, -1, 64])]; + tensor transpose_155 = transpose(perm = var_223_perm_0, x = var_222_cast)[name = tensor("transpose_155")]; + tensor key_states_7_cast = reshape(shape = var_243, x = transpose_155)[name = tensor("key_states_7_cast")]; + tensor var_245 = const()[name = tensor("op_245"), val = tensor([20, -1, 64])]; + tensor transpose_154 = transpose(perm = var_230_perm_0, x = var_229_cast)[name = tensor("transpose_154")]; + tensor value_states_7_cast = reshape(shape = var_245, x = transpose_154)[name = tensor("value_states_7_cast")]; + tensor var_248_perm_0 = const()[name = tensor("op_248_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_7_transpose_x_0 = const()[name = tensor("attn_weights_7_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_7_transpose_y_0 = const()[name = tensor("attn_weights_7_transpose_y_0"), val = tensor(false)]; + tensor transpose_152 = transpose(perm = var_248_perm_0, x = key_states_7_cast)[name = tensor("transpose_152")]; + tensor attn_weights_7_cast = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3_cast, y = transpose_152)[name = tensor("attn_weights_7_cast")]; + tensor var_250 = const()[name = tensor("op_250"), val = tensor([1, 20, 77, 77])]; + tensor var_251_cast = reshape(shape = var_250, x = attn_weights_7_cast)[name = tensor("op_251_cast")]; + tensor attn_weights_9_cast = add(x = var_251_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_9_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([20, 77, 77])]; + tensor input_21_cast = reshape(shape = var_256, x = attn_weights_9_cast)[name = tensor("input_21_cast")]; + tensor input_23_cast = softmax(axis = var_5, x = input_21_cast)[name = tensor("input_23_cast")]; + tensor attn_output_7_transpose_x_0 = const()[name = tensor("attn_output_7_transpose_x_0"), val = tensor(false)]; + tensor attn_output_7_transpose_y_0 = const()[name = tensor("attn_output_7_transpose_y_0"), val = tensor(false)]; + tensor attn_output_7_cast = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast, y = value_states_7_cast)[name = tensor("attn_output_7_cast")]; + tensor var_261 = const()[name = tensor("op_261"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_9_cast = reshape(shape = var_261, x = attn_output_7_cast)[name = tensor("attn_output_9_cast")]; + tensor attn_output_11_perm_0 = const()[name = tensor("attn_output_11_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_264 = const()[name = tensor("op_264"), val = tensor([1, 77, 1280])]; + tensor transpose_151 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast)[name = tensor("transpose_151")]; + tensor input_25_cast = reshape(shape = var_264, x = transpose_151)[name = tensor("input_25_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175893376)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179170240)))]; + tensor hidden_states_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16, x = input_25_cast)[name = tensor("hidden_states_9_cast")]; + tensor input_27_cast = add(x = input_19_cast, y = hidden_states_9_cast)[name = tensor("input_27_cast")]; + tensor input_29_axes_0 = const()[name = tensor("input_29_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179172864)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179175488)))]; + tensor input_29_cast = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast)[name = tensor("input_29_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179178112)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192285376)))]; + tensor input_31_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16, x = input_29_cast)[name = tensor("input_31_cast")]; + tensor input_33_mode_0 = const()[name = tensor("input_33_mode_0"), val = tensor("EXACT")]; + tensor input_33_cast = gelu(mode = input_33_mode_0, x = input_31_cast)[name = tensor("input_33_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192295680)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205402944)))]; + tensor hidden_states_11_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_11_cast")]; + tensor input_35_cast = add(x = input_27_cast, y = hidden_states_11_cast)[name = tensor("input_35_cast")]; + tensor hidden_states_13_axes_0 = const()[name = tensor("hidden_states_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205405568)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205408192)))]; + tensor hidden_states_13_cast = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast)[name = tensor("hidden_states_13_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205410816)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208687680)))]; + tensor var_302_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("op_302_cast")]; + tensor var_303_to_fp16 = const()[name = tensor("op_303_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_17_cast = mul(x = var_302_cast, y = var_303_to_fp16)[name = tensor("tensor_17_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208690304)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211967168)))]; + tensor tensor_13_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_13_cast")]; + tensor var_308 = const()[name = tensor("op_308"), val = tensor([1, -1, 20, 64])]; + tensor var_309_cast = reshape(shape = var_308, x = tensor_13_cast)[name = tensor("op_309_cast")]; + tensor var_310_perm_0 = const()[name = tensor("op_310_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211969792)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215246656)))]; + tensor tensor_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_15_cast")]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, -1, 20, 64])]; + tensor var_316_cast = reshape(shape = var_315, x = tensor_15_cast)[name = tensor("op_316_cast")]; + tensor var_317_perm_0 = const()[name = tensor("op_317_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_324 = const()[name = tensor("op_324"), val = tensor([1, 77, 20, 64])]; + tensor var_325_cast = reshape(shape = var_324, x = tensor_17_cast)[name = tensor("op_325_cast")]; + tensor var_326_perm_0 = const()[name = tensor("op_326_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_328 = const()[name = tensor("op_328"), val = tensor([20, -1, 64])]; + tensor transpose_148 = transpose(perm = var_326_perm_0, x = var_325_cast)[name = tensor("transpose_148")]; + tensor query_states_5_cast = reshape(shape = var_328, x = transpose_148)[name = tensor("query_states_5_cast")]; + tensor var_330 = const()[name = tensor("op_330"), val = tensor([20, -1, 64])]; + tensor transpose_150 = transpose(perm = var_310_perm_0, x = var_309_cast)[name = tensor("transpose_150")]; + tensor key_states_11_cast = reshape(shape = var_330, x = transpose_150)[name = tensor("key_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([20, -1, 64])]; + tensor transpose_149 = transpose(perm = var_317_perm_0, x = var_316_cast)[name = tensor("transpose_149")]; + tensor value_states_11_cast = reshape(shape = var_332, x = transpose_149)[name = tensor("value_states_11_cast")]; + tensor var_335_perm_0 = const()[name = tensor("op_335_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor transpose_147 = transpose(perm = var_335_perm_0, x = key_states_11_cast)[name = tensor("transpose_147")]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5_cast, y = transpose_147)[name = tensor("attn_weights_13_cast")]; + tensor var_337 = const()[name = tensor("op_337"), val = tensor([1, 20, 77, 77])]; + tensor var_338_cast = reshape(shape = var_337, x = attn_weights_13_cast)[name = tensor("op_338_cast")]; + tensor attn_weights_15_cast = add(x = var_338_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_343 = const()[name = tensor("op_343"), val = tensor([20, 77, 77])]; + tensor input_37_cast = reshape(shape = var_343, x = attn_weights_15_cast)[name = tensor("input_37_cast")]; + tensor input_39_cast = softmax(axis = var_5, x = input_37_cast)[name = tensor("input_39_cast")]; + tensor attn_output_13_transpose_x_0 = const()[name = tensor("attn_output_13_transpose_x_0"), val = tensor(false)]; + tensor attn_output_13_transpose_y_0 = const()[name = tensor("attn_output_13_transpose_y_0"), val = tensor(false)]; + tensor attn_output_13_cast = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast, y = value_states_11_cast)[name = tensor("attn_output_13_cast")]; + tensor var_348 = const()[name = tensor("op_348"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_15_cast = reshape(shape = var_348, x = attn_output_13_cast)[name = tensor("attn_output_15_cast")]; + tensor attn_output_17_perm_0 = const()[name = tensor("attn_output_17_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_351 = const()[name = tensor("op_351"), val = tensor([1, 77, 1280])]; + tensor transpose_146 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast)[name = tensor("transpose_146")]; + tensor input_41_cast = reshape(shape = var_351, x = transpose_146)[name = tensor("input_41_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215249280)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218526144)))]; + tensor hidden_states_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_15_cast")]; + tensor input_43_cast = add(x = input_35_cast, y = hidden_states_15_cast)[name = tensor("input_43_cast")]; + tensor input_45_axes_0 = const()[name = tensor("input_45_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218528768)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218531392)))]; + tensor input_45_cast = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218534016)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231641280)))]; + tensor input_47_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16, x = input_45_cast)[name = tensor("input_47_cast")]; + tensor input_49_mode_0 = const()[name = tensor("input_49_mode_0"), val = tensor("EXACT")]; + tensor input_49_cast = gelu(mode = input_49_mode_0, x = input_47_cast)[name = tensor("input_49_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231651584)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244758848)))]; + tensor hidden_states_17_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16, x = input_49_cast)[name = tensor("hidden_states_17_cast")]; + tensor input_51_cast = add(x = input_43_cast, y = hidden_states_17_cast)[name = tensor("input_51_cast")]; + tensor hidden_states_19_axes_0 = const()[name = tensor("hidden_states_19_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244761472)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244764096)))]; + tensor hidden_states_19_cast = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast)[name = tensor("hidden_states_19_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244766720)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248043584)))]; + tensor var_389_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("op_389_cast")]; + tensor var_390_to_fp16 = const()[name = tensor("op_390_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_23_cast = mul(x = var_389_cast, y = var_390_to_fp16)[name = tensor("tensor_23_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248046208)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251323072)))]; + tensor tensor_19_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_19_cast")]; + tensor var_395 = const()[name = tensor("op_395"), val = tensor([1, -1, 20, 64])]; + tensor var_396_cast = reshape(shape = var_395, x = tensor_19_cast)[name = tensor("op_396_cast")]; + tensor var_397_perm_0 = const()[name = tensor("op_397_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251325696)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254602560)))]; + tensor tensor_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_21_cast")]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, -1, 20, 64])]; + tensor var_403_cast = reshape(shape = var_402, x = tensor_21_cast)[name = tensor("op_403_cast")]; + tensor var_404_perm_0 = const()[name = tensor("op_404_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_411 = const()[name = tensor("op_411"), val = tensor([1, 77, 20, 64])]; + tensor var_412_cast = reshape(shape = var_411, x = tensor_23_cast)[name = tensor("op_412_cast")]; + tensor var_413_perm_0 = const()[name = tensor("op_413_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_415 = const()[name = tensor("op_415"), val = tensor([20, -1, 64])]; + tensor transpose_143 = transpose(perm = var_413_perm_0, x = var_412_cast)[name = tensor("transpose_143")]; + tensor query_states_7_cast = reshape(shape = var_415, x = transpose_143)[name = tensor("query_states_7_cast")]; + tensor var_417 = const()[name = tensor("op_417"), val = tensor([20, -1, 64])]; + tensor transpose_145 = transpose(perm = var_397_perm_0, x = var_396_cast)[name = tensor("transpose_145")]; + tensor key_states_15_cast = reshape(shape = var_417, x = transpose_145)[name = tensor("key_states_15_cast")]; + tensor var_419 = const()[name = tensor("op_419"), val = tensor([20, -1, 64])]; + tensor transpose_144 = transpose(perm = var_404_perm_0, x = var_403_cast)[name = tensor("transpose_144")]; + tensor value_states_15_cast = reshape(shape = var_419, x = transpose_144)[name = tensor("value_states_15_cast")]; + tensor var_422_perm_0 = const()[name = tensor("op_422_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_19_transpose_x_0 = const()[name = tensor("attn_weights_19_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_19_transpose_y_0 = const()[name = tensor("attn_weights_19_transpose_y_0"), val = tensor(false)]; + tensor transpose_142 = transpose(perm = var_422_perm_0, x = key_states_15_cast)[name = tensor("transpose_142")]; + tensor attn_weights_19_cast = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7_cast, y = transpose_142)[name = tensor("attn_weights_19_cast")]; + tensor var_424 = const()[name = tensor("op_424"), val = tensor([1, 20, 77, 77])]; + tensor var_425_cast = reshape(shape = var_424, x = attn_weights_19_cast)[name = tensor("op_425_cast")]; + tensor attn_weights_21_cast = add(x = var_425_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_21_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([20, 77, 77])]; + tensor input_53_cast = reshape(shape = var_430, x = attn_weights_21_cast)[name = tensor("input_53_cast")]; + tensor input_55_cast = softmax(axis = var_5, x = input_53_cast)[name = tensor("input_55_cast")]; + tensor attn_output_19_transpose_x_0 = const()[name = tensor("attn_output_19_transpose_x_0"), val = tensor(false)]; + tensor attn_output_19_transpose_y_0 = const()[name = tensor("attn_output_19_transpose_y_0"), val = tensor(false)]; + tensor attn_output_19_cast = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast, y = value_states_15_cast)[name = tensor("attn_output_19_cast")]; + tensor var_435 = const()[name = tensor("op_435"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_21_cast = reshape(shape = var_435, x = attn_output_19_cast)[name = tensor("attn_output_21_cast")]; + tensor attn_output_23_perm_0 = const()[name = tensor("attn_output_23_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1, 77, 1280])]; + tensor transpose_141 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast)[name = tensor("transpose_141")]; + tensor input_57_cast = reshape(shape = var_438, x = transpose_141)[name = tensor("input_57_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254605184)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257882048)))]; + tensor hidden_states_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16, x = input_57_cast)[name = tensor("hidden_states_21_cast")]; + tensor input_59_cast = add(x = input_51_cast, y = hidden_states_21_cast)[name = tensor("input_59_cast")]; + tensor input_61_axes_0 = const()[name = tensor("input_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257884672)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257887296)))]; + tensor input_61_cast = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast)[name = tensor("input_61_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257889920)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270997184)))]; + tensor input_63_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16, x = input_61_cast)[name = tensor("input_63_cast")]; + tensor input_65_mode_0 = const()[name = tensor("input_65_mode_0"), val = tensor("EXACT")]; + tensor input_65_cast = gelu(mode = input_65_mode_0, x = input_63_cast)[name = tensor("input_65_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(271007488)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284114752)))]; + tensor hidden_states_23_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16, x = input_65_cast)[name = tensor("hidden_states_23_cast")]; + tensor input_67_cast = add(x = input_59_cast, y = hidden_states_23_cast)[name = tensor("input_67_cast")]; + tensor hidden_states_25_axes_0 = const()[name = tensor("hidden_states_25_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284117376)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284120000)))]; + tensor hidden_states_25_cast = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_25_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284122624)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287399488)))]; + tensor var_476_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("op_476_cast")]; + tensor var_477_to_fp16 = const()[name = tensor("op_477_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_29_cast = mul(x = var_476_cast, y = var_477_to_fp16)[name = tensor("tensor_29_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287402112)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290678976)))]; + tensor tensor_25_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_25_cast")]; + tensor var_482 = const()[name = tensor("op_482"), val = tensor([1, -1, 20, 64])]; + tensor var_483_cast = reshape(shape = var_482, x = tensor_25_cast)[name = tensor("op_483_cast")]; + tensor var_484_perm_0 = const()[name = tensor("op_484_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290681600)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293958464)))]; + tensor tensor_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_27_cast")]; + tensor var_489 = const()[name = tensor("op_489"), val = tensor([1, -1, 20, 64])]; + tensor var_490_cast = reshape(shape = var_489, x = tensor_27_cast)[name = tensor("op_490_cast")]; + tensor var_491_perm_0 = const()[name = tensor("op_491_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_498 = const()[name = tensor("op_498"), val = tensor([1, 77, 20, 64])]; + tensor var_499_cast = reshape(shape = var_498, x = tensor_29_cast)[name = tensor("op_499_cast")]; + tensor var_500_perm_0 = const()[name = tensor("op_500_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_502 = const()[name = tensor("op_502"), val = tensor([20, -1, 64])]; + tensor transpose_138 = transpose(perm = var_500_perm_0, x = var_499_cast)[name = tensor("transpose_138")]; + tensor query_states_9_cast = reshape(shape = var_502, x = transpose_138)[name = tensor("query_states_9_cast")]; + tensor var_504 = const()[name = tensor("op_504"), val = tensor([20, -1, 64])]; + tensor transpose_140 = transpose(perm = var_484_perm_0, x = var_483_cast)[name = tensor("transpose_140")]; + tensor key_states_19_cast = reshape(shape = var_504, x = transpose_140)[name = tensor("key_states_19_cast")]; + tensor var_506 = const()[name = tensor("op_506"), val = tensor([20, -1, 64])]; + tensor transpose_139 = transpose(perm = var_491_perm_0, x = var_490_cast)[name = tensor("transpose_139")]; + tensor value_states_19_cast = reshape(shape = var_506, x = transpose_139)[name = tensor("value_states_19_cast")]; + tensor var_509_perm_0 = const()[name = tensor("op_509_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor transpose_137 = transpose(perm = var_509_perm_0, x = key_states_19_cast)[name = tensor("transpose_137")]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9_cast, y = transpose_137)[name = tensor("attn_weights_25_cast")]; + tensor var_511 = const()[name = tensor("op_511"), val = tensor([1, 20, 77, 77])]; + tensor var_512_cast = reshape(shape = var_511, x = attn_weights_25_cast)[name = tensor("op_512_cast")]; + tensor attn_weights_27_cast = add(x = var_512_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_517 = const()[name = tensor("op_517"), val = tensor([20, 77, 77])]; + tensor input_69_cast = reshape(shape = var_517, x = attn_weights_27_cast)[name = tensor("input_69_cast")]; + tensor input_71_cast = softmax(axis = var_5, x = input_69_cast)[name = tensor("input_71_cast")]; + tensor attn_output_25_transpose_x_0 = const()[name = tensor("attn_output_25_transpose_x_0"), val = tensor(false)]; + tensor attn_output_25_transpose_y_0 = const()[name = tensor("attn_output_25_transpose_y_0"), val = tensor(false)]; + tensor attn_output_25_cast = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast, y = value_states_19_cast)[name = tensor("attn_output_25_cast")]; + tensor var_522 = const()[name = tensor("op_522"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_27_cast = reshape(shape = var_522, x = attn_output_25_cast)[name = tensor("attn_output_27_cast")]; + tensor attn_output_29_perm_0 = const()[name = tensor("attn_output_29_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 77, 1280])]; + tensor transpose_136 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast)[name = tensor("transpose_136")]; + tensor input_73_cast = reshape(shape = var_525, x = transpose_136)[name = tensor("input_73_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293961088)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297237952)))]; + tensor hidden_states_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16, x = input_73_cast)[name = tensor("hidden_states_27_cast")]; + tensor input_75_cast = add(x = input_67_cast, y = hidden_states_27_cast)[name = tensor("input_75_cast")]; + tensor input_77_axes_0 = const()[name = tensor("input_77_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297240576)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297243200)))]; + tensor input_77_cast = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297245824)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310353088)))]; + tensor input_79_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16, x = input_77_cast)[name = tensor("input_79_cast")]; + tensor input_81_mode_0 = const()[name = tensor("input_81_mode_0"), val = tensor("EXACT")]; + tensor input_81_cast = gelu(mode = input_81_mode_0, x = input_79_cast)[name = tensor("input_81_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310363392)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323470656)))]; + tensor hidden_states_29_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16, x = input_81_cast)[name = tensor("hidden_states_29_cast")]; + tensor input_83_cast = add(x = input_75_cast, y = hidden_states_29_cast)[name = tensor("input_83_cast")]; + tensor hidden_states_31_axes_0 = const()[name = tensor("hidden_states_31_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323473280)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323475904)))]; + tensor hidden_states_31_cast = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast)[name = tensor("hidden_states_31_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323478528)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326755392)))]; + tensor var_563_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("op_563_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_35_cast = mul(x = var_563_cast, y = var_564_to_fp16)[name = tensor("tensor_35_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326758016)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330034880)))]; + tensor tensor_31_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_31_cast")]; + tensor var_569 = const()[name = tensor("op_569"), val = tensor([1, -1, 20, 64])]; + tensor var_570_cast = reshape(shape = var_569, x = tensor_31_cast)[name = tensor("op_570_cast")]; + tensor var_571_perm_0 = const()[name = tensor("op_571_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330037504)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333314368)))]; + tensor tensor_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_33_cast")]; + tensor var_576 = const()[name = tensor("op_576"), val = tensor([1, -1, 20, 64])]; + tensor var_577_cast = reshape(shape = var_576, x = tensor_33_cast)[name = tensor("op_577_cast")]; + tensor var_578_perm_0 = const()[name = tensor("op_578_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 77, 20, 64])]; + tensor var_586_cast = reshape(shape = var_585, x = tensor_35_cast)[name = tensor("op_586_cast")]; + tensor var_587_perm_0 = const()[name = tensor("op_587_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([20, -1, 64])]; + tensor transpose_133 = transpose(perm = var_587_perm_0, x = var_586_cast)[name = tensor("transpose_133")]; + tensor query_states_11_cast = reshape(shape = var_589, x = transpose_133)[name = tensor("query_states_11_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([20, -1, 64])]; + tensor transpose_135 = transpose(perm = var_571_perm_0, x = var_570_cast)[name = tensor("transpose_135")]; + tensor key_states_23_cast = reshape(shape = var_591, x = transpose_135)[name = tensor("key_states_23_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([20, -1, 64])]; + tensor transpose_134 = transpose(perm = var_578_perm_0, x = var_577_cast)[name = tensor("transpose_134")]; + tensor value_states_23_cast = reshape(shape = var_593, x = transpose_134)[name = tensor("value_states_23_cast")]; + tensor var_596_perm_0 = const()[name = tensor("op_596_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_31_transpose_x_0 = const()[name = tensor("attn_weights_31_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_31_transpose_y_0 = const()[name = tensor("attn_weights_31_transpose_y_0"), val = tensor(false)]; + tensor transpose_132 = transpose(perm = var_596_perm_0, x = key_states_23_cast)[name = tensor("transpose_132")]; + tensor attn_weights_31_cast = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11_cast, y = transpose_132)[name = tensor("attn_weights_31_cast")]; + tensor var_598 = const()[name = tensor("op_598"), val = tensor([1, 20, 77, 77])]; + tensor var_599_cast = reshape(shape = var_598, x = attn_weights_31_cast)[name = tensor("op_599_cast")]; + tensor attn_weights_33_cast = add(x = var_599_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_33_cast")]; + tensor var_604 = const()[name = tensor("op_604"), val = tensor([20, 77, 77])]; + tensor input_85_cast = reshape(shape = var_604, x = attn_weights_33_cast)[name = tensor("input_85_cast")]; + tensor input_87_cast = softmax(axis = var_5, x = input_85_cast)[name = tensor("input_87_cast")]; + tensor attn_output_31_transpose_x_0 = const()[name = tensor("attn_output_31_transpose_x_0"), val = tensor(false)]; + tensor attn_output_31_transpose_y_0 = const()[name = tensor("attn_output_31_transpose_y_0"), val = tensor(false)]; + tensor attn_output_31_cast = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast, y = value_states_23_cast)[name = tensor("attn_output_31_cast")]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_33_cast = reshape(shape = var_609, x = attn_output_31_cast)[name = tensor("attn_output_33_cast")]; + tensor attn_output_35_perm_0 = const()[name = tensor("attn_output_35_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_612 = const()[name = tensor("op_612"), val = tensor([1, 77, 1280])]; + tensor transpose_131 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast)[name = tensor("transpose_131")]; + tensor input_89_cast = reshape(shape = var_612, x = transpose_131)[name = tensor("input_89_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333316992)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336593856)))]; + tensor hidden_states_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16, x = input_89_cast)[name = tensor("hidden_states_33_cast")]; + tensor input_91_cast = add(x = input_83_cast, y = hidden_states_33_cast)[name = tensor("input_91_cast")]; + tensor input_93_axes_0 = const()[name = tensor("input_93_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336596480)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336599104)))]; + tensor input_93_cast = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast)[name = tensor("input_93_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336601728)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349708992)))]; + tensor input_95_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16, x = input_93_cast)[name = tensor("input_95_cast")]; + tensor input_97_mode_0 = const()[name = tensor("input_97_mode_0"), val = tensor("EXACT")]; + tensor input_97_cast = gelu(mode = input_97_mode_0, x = input_95_cast)[name = tensor("input_97_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349719296)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362826560)))]; + tensor hidden_states_35_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16, x = input_97_cast)[name = tensor("hidden_states_35_cast")]; + tensor input_99_cast = add(x = input_91_cast, y = hidden_states_35_cast)[name = tensor("input_99_cast")]; + tensor hidden_states_37_axes_0 = const()[name = tensor("hidden_states_37_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362829184)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362831808)))]; + tensor hidden_states_37_cast = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast)[name = tensor("hidden_states_37_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362834432)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366111296)))]; + tensor var_650_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("op_650_cast")]; + tensor var_651_to_fp16 = const()[name = tensor("op_651_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_41_cast = mul(x = var_650_cast, y = var_651_to_fp16)[name = tensor("tensor_41_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366113920)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369390784)))]; + tensor tensor_37_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_37_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([1, -1, 20, 64])]; + tensor var_657_cast = reshape(shape = var_656, x = tensor_37_cast)[name = tensor("op_657_cast")]; + tensor var_658_perm_0 = const()[name = tensor("op_658_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369393408)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372670272)))]; + tensor tensor_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_39_cast")]; + tensor var_663 = const()[name = tensor("op_663"), val = tensor([1, -1, 20, 64])]; + tensor var_664_cast = reshape(shape = var_663, x = tensor_39_cast)[name = tensor("op_664_cast")]; + tensor var_665_perm_0 = const()[name = tensor("op_665_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 77, 20, 64])]; + tensor var_673_cast = reshape(shape = var_672, x = tensor_41_cast)[name = tensor("op_673_cast")]; + tensor var_674_perm_0 = const()[name = tensor("op_674_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_676 = const()[name = tensor("op_676"), val = tensor([20, -1, 64])]; + tensor transpose_128 = transpose(perm = var_674_perm_0, x = var_673_cast)[name = tensor("transpose_128")]; + tensor query_states_13_cast = reshape(shape = var_676, x = transpose_128)[name = tensor("query_states_13_cast")]; + tensor var_678 = const()[name = tensor("op_678"), val = tensor([20, -1, 64])]; + tensor transpose_130 = transpose(perm = var_658_perm_0, x = var_657_cast)[name = tensor("transpose_130")]; + tensor key_states_27_cast = reshape(shape = var_678, x = transpose_130)[name = tensor("key_states_27_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([20, -1, 64])]; + tensor transpose_129 = transpose(perm = var_665_perm_0, x = var_664_cast)[name = tensor("transpose_129")]; + tensor value_states_27_cast = reshape(shape = var_680, x = transpose_129)[name = tensor("value_states_27_cast")]; + tensor var_683_perm_0 = const()[name = tensor("op_683_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor transpose_127 = transpose(perm = var_683_perm_0, x = key_states_27_cast)[name = tensor("transpose_127")]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13_cast, y = transpose_127)[name = tensor("attn_weights_37_cast")]; + tensor var_685 = const()[name = tensor("op_685"), val = tensor([1, 20, 77, 77])]; + tensor var_686_cast = reshape(shape = var_685, x = attn_weights_37_cast)[name = tensor("op_686_cast")]; + tensor attn_weights_39_cast = add(x = var_686_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_691 = const()[name = tensor("op_691"), val = tensor([20, 77, 77])]; + tensor input_101_cast = reshape(shape = var_691, x = attn_weights_39_cast)[name = tensor("input_101_cast")]; + tensor input_103_cast = softmax(axis = var_5, x = input_101_cast)[name = tensor("input_103_cast")]; + tensor attn_output_37_transpose_x_0 = const()[name = tensor("attn_output_37_transpose_x_0"), val = tensor(false)]; + tensor attn_output_37_transpose_y_0 = const()[name = tensor("attn_output_37_transpose_y_0"), val = tensor(false)]; + tensor attn_output_37_cast = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast, y = value_states_27_cast)[name = tensor("attn_output_37_cast")]; + tensor var_696 = const()[name = tensor("op_696"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_39_cast = reshape(shape = var_696, x = attn_output_37_cast)[name = tensor("attn_output_39_cast")]; + tensor attn_output_41_perm_0 = const()[name = tensor("attn_output_41_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_699 = const()[name = tensor("op_699"), val = tensor([1, 77, 1280])]; + tensor transpose_126 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast)[name = tensor("transpose_126")]; + tensor input_105_cast = reshape(shape = var_699, x = transpose_126)[name = tensor("input_105_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372672896)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375949760)))]; + tensor hidden_states_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16, x = input_105_cast)[name = tensor("hidden_states_39_cast")]; + tensor input_107_cast = add(x = input_99_cast, y = hidden_states_39_cast)[name = tensor("input_107_cast")]; + tensor input_109_axes_0 = const()[name = tensor("input_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375952384)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375955008)))]; + tensor input_109_cast = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast)[name = tensor("input_109_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375957632)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389064896)))]; + tensor input_111_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16, x = input_109_cast)[name = tensor("input_111_cast")]; + tensor input_113_mode_0 = const()[name = tensor("input_113_mode_0"), val = tensor("EXACT")]; + tensor input_113_cast = gelu(mode = input_113_mode_0, x = input_111_cast)[name = tensor("input_113_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389075200)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402182464)))]; + tensor hidden_states_41_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16, x = input_113_cast)[name = tensor("hidden_states_41_cast")]; + tensor input_115_cast = add(x = input_107_cast, y = hidden_states_41_cast)[name = tensor("input_115_cast")]; + tensor hidden_states_43_axes_0 = const()[name = tensor("hidden_states_43_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402185088)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402187712)))]; + tensor hidden_states_43_cast = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast)[name = tensor("hidden_states_43_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402190336)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405467200)))]; + tensor var_737_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("op_737_cast")]; + tensor var_738_to_fp16 = const()[name = tensor("op_738_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_47_cast = mul(x = var_737_cast, y = var_738_to_fp16)[name = tensor("tensor_47_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405469824)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408746688)))]; + tensor tensor_43_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_43_cast")]; + tensor var_743 = const()[name = tensor("op_743"), val = tensor([1, -1, 20, 64])]; + tensor var_744_cast = reshape(shape = var_743, x = tensor_43_cast)[name = tensor("op_744_cast")]; + tensor var_745_perm_0 = const()[name = tensor("op_745_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408749312)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412026176)))]; + tensor tensor_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_45_cast")]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, -1, 20, 64])]; + tensor var_751_cast = reshape(shape = var_750, x = tensor_45_cast)[name = tensor("op_751_cast")]; + tensor var_752_perm_0 = const()[name = tensor("op_752_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_759 = const()[name = tensor("op_759"), val = tensor([1, 77, 20, 64])]; + tensor var_760_cast = reshape(shape = var_759, x = tensor_47_cast)[name = tensor("op_760_cast")]; + tensor var_761_perm_0 = const()[name = tensor("op_761_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_763 = const()[name = tensor("op_763"), val = tensor([20, -1, 64])]; + tensor transpose_123 = transpose(perm = var_761_perm_0, x = var_760_cast)[name = tensor("transpose_123")]; + tensor query_states_15_cast = reshape(shape = var_763, x = transpose_123)[name = tensor("query_states_15_cast")]; + tensor var_765 = const()[name = tensor("op_765"), val = tensor([20, -1, 64])]; + tensor transpose_125 = transpose(perm = var_745_perm_0, x = var_744_cast)[name = tensor("transpose_125")]; + tensor key_states_31_cast = reshape(shape = var_765, x = transpose_125)[name = tensor("key_states_31_cast")]; + tensor var_767 = const()[name = tensor("op_767"), val = tensor([20, -1, 64])]; + tensor transpose_124 = transpose(perm = var_752_perm_0, x = var_751_cast)[name = tensor("transpose_124")]; + tensor value_states_31_cast = reshape(shape = var_767, x = transpose_124)[name = tensor("value_states_31_cast")]; + tensor var_770_perm_0 = const()[name = tensor("op_770_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_43_transpose_x_0 = const()[name = tensor("attn_weights_43_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_43_transpose_y_0 = const()[name = tensor("attn_weights_43_transpose_y_0"), val = tensor(false)]; + tensor transpose_122 = transpose(perm = var_770_perm_0, x = key_states_31_cast)[name = tensor("transpose_122")]; + tensor attn_weights_43_cast = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15_cast, y = transpose_122)[name = tensor("attn_weights_43_cast")]; + tensor var_772 = const()[name = tensor("op_772"), val = tensor([1, 20, 77, 77])]; + tensor var_773_cast = reshape(shape = var_772, x = attn_weights_43_cast)[name = tensor("op_773_cast")]; + tensor attn_weights_45_cast = add(x = var_773_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_45_cast")]; + tensor var_778 = const()[name = tensor("op_778"), val = tensor([20, 77, 77])]; + tensor input_117_cast = reshape(shape = var_778, x = attn_weights_45_cast)[name = tensor("input_117_cast")]; + tensor input_119_cast = softmax(axis = var_5, x = input_117_cast)[name = tensor("input_119_cast")]; + tensor attn_output_43_transpose_x_0 = const()[name = tensor("attn_output_43_transpose_x_0"), val = tensor(false)]; + tensor attn_output_43_transpose_y_0 = const()[name = tensor("attn_output_43_transpose_y_0"), val = tensor(false)]; + tensor attn_output_43_cast = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast, y = value_states_31_cast)[name = tensor("attn_output_43_cast")]; + tensor var_783 = const()[name = tensor("op_783"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_45_cast = reshape(shape = var_783, x = attn_output_43_cast)[name = tensor("attn_output_45_cast")]; + tensor attn_output_47_perm_0 = const()[name = tensor("attn_output_47_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 77, 1280])]; + tensor transpose_121 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast)[name = tensor("transpose_121")]; + tensor input_121_cast = reshape(shape = var_786, x = transpose_121)[name = tensor("input_121_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412028800)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415305664)))]; + tensor hidden_states_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16, x = input_121_cast)[name = tensor("hidden_states_45_cast")]; + tensor input_123_cast = add(x = input_115_cast, y = hidden_states_45_cast)[name = tensor("input_123_cast")]; + tensor input_125_axes_0 = const()[name = tensor("input_125_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415308288)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415310912)))]; + tensor input_125_cast = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast)[name = tensor("input_125_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415313536)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428420800)))]; + tensor input_127_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16, x = input_125_cast)[name = tensor("input_127_cast")]; + tensor input_129_mode_0 = const()[name = tensor("input_129_mode_0"), val = tensor("EXACT")]; + tensor input_129_cast = gelu(mode = input_129_mode_0, x = input_127_cast)[name = tensor("input_129_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428431104)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441538368)))]; + tensor hidden_states_47_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16, x = input_129_cast)[name = tensor("hidden_states_47_cast")]; + tensor input_131_cast = add(x = input_123_cast, y = hidden_states_47_cast)[name = tensor("input_131_cast")]; + tensor hidden_states_49_axes_0 = const()[name = tensor("hidden_states_49_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441540992)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441543616)))]; + tensor hidden_states_49_cast = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast)[name = tensor("hidden_states_49_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441546240)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444823104)))]; + tensor var_824_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("op_824_cast")]; + tensor var_825_to_fp16 = const()[name = tensor("op_825_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_53_cast = mul(x = var_824_cast, y = var_825_to_fp16)[name = tensor("tensor_53_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444825728)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448102592)))]; + tensor tensor_49_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_49_cast")]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, -1, 20, 64])]; + tensor var_831_cast = reshape(shape = var_830, x = tensor_49_cast)[name = tensor("op_831_cast")]; + tensor var_832_perm_0 = const()[name = tensor("op_832_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448105216)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451382080)))]; + tensor tensor_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_51_cast")]; + tensor var_837 = const()[name = tensor("op_837"), val = tensor([1, -1, 20, 64])]; + tensor var_838_cast = reshape(shape = var_837, x = tensor_51_cast)[name = tensor("op_838_cast")]; + tensor var_839_perm_0 = const()[name = tensor("op_839_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_846 = const()[name = tensor("op_846"), val = tensor([1, 77, 20, 64])]; + tensor var_847_cast = reshape(shape = var_846, x = tensor_53_cast)[name = tensor("op_847_cast")]; + tensor var_848_perm_0 = const()[name = tensor("op_848_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_850 = const()[name = tensor("op_850"), val = tensor([20, -1, 64])]; + tensor transpose_118 = transpose(perm = var_848_perm_0, x = var_847_cast)[name = tensor("transpose_118")]; + tensor query_states_17_cast = reshape(shape = var_850, x = transpose_118)[name = tensor("query_states_17_cast")]; + tensor var_852 = const()[name = tensor("op_852"), val = tensor([20, -1, 64])]; + tensor transpose_120 = transpose(perm = var_832_perm_0, x = var_831_cast)[name = tensor("transpose_120")]; + tensor key_states_35_cast = reshape(shape = var_852, x = transpose_120)[name = tensor("key_states_35_cast")]; + tensor var_854 = const()[name = tensor("op_854"), val = tensor([20, -1, 64])]; + tensor transpose_119 = transpose(perm = var_839_perm_0, x = var_838_cast)[name = tensor("transpose_119")]; + tensor value_states_35_cast = reshape(shape = var_854, x = transpose_119)[name = tensor("value_states_35_cast")]; + tensor var_857_perm_0 = const()[name = tensor("op_857_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor transpose_117 = transpose(perm = var_857_perm_0, x = key_states_35_cast)[name = tensor("transpose_117")]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17_cast, y = transpose_117)[name = tensor("attn_weights_49_cast")]; + tensor var_859 = const()[name = tensor("op_859"), val = tensor([1, 20, 77, 77])]; + tensor var_860_cast = reshape(shape = var_859, x = attn_weights_49_cast)[name = tensor("op_860_cast")]; + tensor attn_weights_51_cast = add(x = var_860_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_865 = const()[name = tensor("op_865"), val = tensor([20, 77, 77])]; + tensor input_133_cast = reshape(shape = var_865, x = attn_weights_51_cast)[name = tensor("input_133_cast")]; + tensor input_135_cast = softmax(axis = var_5, x = input_133_cast)[name = tensor("input_135_cast")]; + tensor attn_output_49_transpose_x_0 = const()[name = tensor("attn_output_49_transpose_x_0"), val = tensor(false)]; + tensor attn_output_49_transpose_y_0 = const()[name = tensor("attn_output_49_transpose_y_0"), val = tensor(false)]; + tensor attn_output_49_cast = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast, y = value_states_35_cast)[name = tensor("attn_output_49_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_51_cast = reshape(shape = var_870, x = attn_output_49_cast)[name = tensor("attn_output_51_cast")]; + tensor attn_output_53_perm_0 = const()[name = tensor("attn_output_53_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_873 = const()[name = tensor("op_873"), val = tensor([1, 77, 1280])]; + tensor transpose_116 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast)[name = tensor("transpose_116")]; + tensor input_137_cast = reshape(shape = var_873, x = transpose_116)[name = tensor("input_137_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451384704)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454661568)))]; + tensor hidden_states_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16, x = input_137_cast)[name = tensor("hidden_states_51_cast")]; + tensor input_139_cast = add(x = input_131_cast, y = hidden_states_51_cast)[name = tensor("input_139_cast")]; + tensor input_141_axes_0 = const()[name = tensor("input_141_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454664192)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454666816)))]; + tensor input_141_cast = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454669440)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467776704)))]; + tensor input_143_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16, x = input_141_cast)[name = tensor("input_143_cast")]; + tensor input_145_mode_0 = const()[name = tensor("input_145_mode_0"), val = tensor("EXACT")]; + tensor input_145_cast = gelu(mode = input_145_mode_0, x = input_143_cast)[name = tensor("input_145_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467787008)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480894272)))]; + tensor hidden_states_53_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16, x = input_145_cast)[name = tensor("hidden_states_53_cast")]; + tensor input_147_cast = add(x = input_139_cast, y = hidden_states_53_cast)[name = tensor("input_147_cast")]; + tensor hidden_states_55_axes_0 = const()[name = tensor("hidden_states_55_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480896896)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480899520)))]; + tensor hidden_states_55_cast = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast)[name = tensor("hidden_states_55_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480902144)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484179008)))]; + tensor var_911_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("op_911_cast")]; + tensor var_912_to_fp16 = const()[name = tensor("op_912_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_59_cast = mul(x = var_911_cast, y = var_912_to_fp16)[name = tensor("tensor_59_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484181632)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487458496)))]; + tensor tensor_55_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_55_cast")]; + tensor var_917 = const()[name = tensor("op_917"), val = tensor([1, -1, 20, 64])]; + tensor var_918_cast = reshape(shape = var_917, x = tensor_55_cast)[name = tensor("op_918_cast")]; + tensor var_919_perm_0 = const()[name = tensor("op_919_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487461120)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490737984)))]; + tensor tensor_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_57_cast")]; + tensor var_924 = const()[name = tensor("op_924"), val = tensor([1, -1, 20, 64])]; + tensor var_925_cast = reshape(shape = var_924, x = tensor_57_cast)[name = tensor("op_925_cast")]; + tensor var_926_perm_0 = const()[name = tensor("op_926_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_933 = const()[name = tensor("op_933"), val = tensor([1, 77, 20, 64])]; + tensor var_934_cast = reshape(shape = var_933, x = tensor_59_cast)[name = tensor("op_934_cast")]; + tensor var_935_perm_0 = const()[name = tensor("op_935_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_937 = const()[name = tensor("op_937"), val = tensor([20, -1, 64])]; + tensor transpose_113 = transpose(perm = var_935_perm_0, x = var_934_cast)[name = tensor("transpose_113")]; + tensor query_states_19_cast = reshape(shape = var_937, x = transpose_113)[name = tensor("query_states_19_cast")]; + tensor var_939 = const()[name = tensor("op_939"), val = tensor([20, -1, 64])]; + tensor transpose_115 = transpose(perm = var_919_perm_0, x = var_918_cast)[name = tensor("transpose_115")]; + tensor key_states_39_cast = reshape(shape = var_939, x = transpose_115)[name = tensor("key_states_39_cast")]; + tensor var_941 = const()[name = tensor("op_941"), val = tensor([20, -1, 64])]; + tensor transpose_114 = transpose(perm = var_926_perm_0, x = var_925_cast)[name = tensor("transpose_114")]; + tensor value_states_39_cast = reshape(shape = var_941, x = transpose_114)[name = tensor("value_states_39_cast")]; + tensor var_944_perm_0 = const()[name = tensor("op_944_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_55_transpose_x_0 = const()[name = tensor("attn_weights_55_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_55_transpose_y_0 = const()[name = tensor("attn_weights_55_transpose_y_0"), val = tensor(false)]; + tensor transpose_112 = transpose(perm = var_944_perm_0, x = key_states_39_cast)[name = tensor("transpose_112")]; + tensor attn_weights_55_cast = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19_cast, y = transpose_112)[name = tensor("attn_weights_55_cast")]; + tensor var_946 = const()[name = tensor("op_946"), val = tensor([1, 20, 77, 77])]; + tensor var_947_cast = reshape(shape = var_946, x = attn_weights_55_cast)[name = tensor("op_947_cast")]; + tensor attn_weights_57_cast = add(x = var_947_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_57_cast")]; + tensor var_952 = const()[name = tensor("op_952"), val = tensor([20, 77, 77])]; + tensor input_149_cast = reshape(shape = var_952, x = attn_weights_57_cast)[name = tensor("input_149_cast")]; + tensor input_151_cast = softmax(axis = var_5, x = input_149_cast)[name = tensor("input_151_cast")]; + tensor attn_output_55_transpose_x_0 = const()[name = tensor("attn_output_55_transpose_x_0"), val = tensor(false)]; + tensor attn_output_55_transpose_y_0 = const()[name = tensor("attn_output_55_transpose_y_0"), val = tensor(false)]; + tensor attn_output_55_cast = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast, y = value_states_39_cast)[name = tensor("attn_output_55_cast")]; + tensor var_957 = const()[name = tensor("op_957"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_57_cast = reshape(shape = var_957, x = attn_output_55_cast)[name = tensor("attn_output_57_cast")]; + tensor attn_output_59_perm_0 = const()[name = tensor("attn_output_59_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_960 = const()[name = tensor("op_960"), val = tensor([1, 77, 1280])]; + tensor transpose_111 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast)[name = tensor("transpose_111")]; + tensor input_153_cast = reshape(shape = var_960, x = transpose_111)[name = tensor("input_153_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490740608)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494017472)))]; + tensor hidden_states_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16, x = input_153_cast)[name = tensor("hidden_states_57_cast")]; + tensor input_155_cast = add(x = input_147_cast, y = hidden_states_57_cast)[name = tensor("input_155_cast")]; + tensor input_157_axes_0 = const()[name = tensor("input_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494020096)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494022720)))]; + tensor input_157_cast = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast)[name = tensor("input_157_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494025344)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507132608)))]; + tensor input_159_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16, x = input_157_cast)[name = tensor("input_159_cast")]; + tensor input_161_mode_0 = const()[name = tensor("input_161_mode_0"), val = tensor("EXACT")]; + tensor input_161_cast = gelu(mode = input_161_mode_0, x = input_159_cast)[name = tensor("input_161_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507142912)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520250176)))]; + tensor hidden_states_59_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16, x = input_161_cast)[name = tensor("hidden_states_59_cast")]; + tensor input_163_cast = add(x = input_155_cast, y = hidden_states_59_cast)[name = tensor("input_163_cast")]; + tensor hidden_states_61_axes_0 = const()[name = tensor("hidden_states_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520252800)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520255424)))]; + tensor hidden_states_61_cast = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast)[name = tensor("hidden_states_61_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520258048)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523534912)))]; + tensor var_998_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("op_998_cast")]; + tensor var_999_to_fp16 = const()[name = tensor("op_999_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_65_cast = mul(x = var_998_cast, y = var_999_to_fp16)[name = tensor("tensor_65_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523537536)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526814400)))]; + tensor tensor_61_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_61_cast")]; + tensor var_1004 = const()[name = tensor("op_1004"), val = tensor([1, -1, 20, 64])]; + tensor var_1005_cast = reshape(shape = var_1004, x = tensor_61_cast)[name = tensor("op_1005_cast")]; + tensor var_1006_perm_0 = const()[name = tensor("op_1006_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526817024)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530093888)))]; + tensor tensor_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_63_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, -1, 20, 64])]; + tensor var_1012_cast = reshape(shape = var_1011, x = tensor_63_cast)[name = tensor("op_1012_cast")]; + tensor var_1013_perm_0 = const()[name = tensor("op_1013_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1020 = const()[name = tensor("op_1020"), val = tensor([1, 77, 20, 64])]; + tensor var_1021_cast = reshape(shape = var_1020, x = tensor_65_cast)[name = tensor("op_1021_cast")]; + tensor var_1022_perm_0 = const()[name = tensor("op_1022_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1024 = const()[name = tensor("op_1024"), val = tensor([20, -1, 64])]; + tensor transpose_108 = transpose(perm = var_1022_perm_0, x = var_1021_cast)[name = tensor("transpose_108")]; + tensor query_states_21_cast = reshape(shape = var_1024, x = transpose_108)[name = tensor("query_states_21_cast")]; + tensor var_1026 = const()[name = tensor("op_1026"), val = tensor([20, -1, 64])]; + tensor transpose_110 = transpose(perm = var_1006_perm_0, x = var_1005_cast)[name = tensor("transpose_110")]; + tensor key_states_43_cast = reshape(shape = var_1026, x = transpose_110)[name = tensor("key_states_43_cast")]; + tensor var_1028 = const()[name = tensor("op_1028"), val = tensor([20, -1, 64])]; + tensor transpose_109 = transpose(perm = var_1013_perm_0, x = var_1012_cast)[name = tensor("transpose_109")]; + tensor value_states_43_cast = reshape(shape = var_1028, x = transpose_109)[name = tensor("value_states_43_cast")]; + tensor var_1031_perm_0 = const()[name = tensor("op_1031_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor transpose_107 = transpose(perm = var_1031_perm_0, x = key_states_43_cast)[name = tensor("transpose_107")]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21_cast, y = transpose_107)[name = tensor("attn_weights_61_cast")]; + tensor var_1033 = const()[name = tensor("op_1033"), val = tensor([1, 20, 77, 77])]; + tensor var_1034_cast = reshape(shape = var_1033, x = attn_weights_61_cast)[name = tensor("op_1034_cast")]; + tensor attn_weights_63_cast = add(x = var_1034_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1039 = const()[name = tensor("op_1039"), val = tensor([20, 77, 77])]; + tensor input_165_cast = reshape(shape = var_1039, x = attn_weights_63_cast)[name = tensor("input_165_cast")]; + tensor input_167_cast = softmax(axis = var_5, x = input_165_cast)[name = tensor("input_167_cast")]; + tensor attn_output_61_transpose_x_0 = const()[name = tensor("attn_output_61_transpose_x_0"), val = tensor(false)]; + tensor attn_output_61_transpose_y_0 = const()[name = tensor("attn_output_61_transpose_y_0"), val = tensor(false)]; + tensor attn_output_61_cast = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast, y = value_states_43_cast)[name = tensor("attn_output_61_cast")]; + tensor var_1044 = const()[name = tensor("op_1044"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_63_cast = reshape(shape = var_1044, x = attn_output_61_cast)[name = tensor("attn_output_63_cast")]; + tensor attn_output_65_perm_0 = const()[name = tensor("attn_output_65_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1, 77, 1280])]; + tensor transpose_106 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast)[name = tensor("transpose_106")]; + tensor input_169_cast = reshape(shape = var_1047, x = transpose_106)[name = tensor("input_169_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530096512)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533373376)))]; + tensor hidden_states_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16, x = input_169_cast)[name = tensor("hidden_states_63_cast")]; + tensor input_171_cast = add(x = input_163_cast, y = hidden_states_63_cast)[name = tensor("input_171_cast")]; + tensor input_173_axes_0 = const()[name = tensor("input_173_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533376000)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533378624)))]; + tensor input_173_cast = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast)[name = tensor("input_173_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533381248)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546488512)))]; + tensor input_175_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16, x = input_173_cast)[name = tensor("input_175_cast")]; + tensor input_177_mode_0 = const()[name = tensor("input_177_mode_0"), val = tensor("EXACT")]; + tensor input_177_cast = gelu(mode = input_177_mode_0, x = input_175_cast)[name = tensor("input_177_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546498816)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559606080)))]; + tensor hidden_states_65_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16, x = input_177_cast)[name = tensor("hidden_states_65_cast")]; + tensor input_179_cast = add(x = input_171_cast, y = hidden_states_65_cast)[name = tensor("input_179_cast")]; + tensor hidden_states_67_axes_0 = const()[name = tensor("hidden_states_67_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559608704)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559611328)))]; + tensor hidden_states_67_cast = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast)[name = tensor("hidden_states_67_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559613952)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562890816)))]; + tensor var_1085_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("op_1085_cast")]; + tensor var_1086_to_fp16 = const()[name = tensor("op_1086_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_71_cast = mul(x = var_1085_cast, y = var_1086_to_fp16)[name = tensor("tensor_71_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562893440)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566170304)))]; + tensor tensor_67_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_67_cast")]; + tensor var_1091 = const()[name = tensor("op_1091"), val = tensor([1, -1, 20, 64])]; + tensor var_1092_cast = reshape(shape = var_1091, x = tensor_67_cast)[name = tensor("op_1092_cast")]; + tensor var_1093_perm_0 = const()[name = tensor("op_1093_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566172928)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569449792)))]; + tensor tensor_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_69_cast")]; + tensor var_1098 = const()[name = tensor("op_1098"), val = tensor([1, -1, 20, 64])]; + tensor var_1099_cast = reshape(shape = var_1098, x = tensor_69_cast)[name = tensor("op_1099_cast")]; + tensor var_1100_perm_0 = const()[name = tensor("op_1100_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1107 = const()[name = tensor("op_1107"), val = tensor([1, 77, 20, 64])]; + tensor var_1108_cast = reshape(shape = var_1107, x = tensor_71_cast)[name = tensor("op_1108_cast")]; + tensor var_1109_perm_0 = const()[name = tensor("op_1109_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1111 = const()[name = tensor("op_1111"), val = tensor([20, -1, 64])]; + tensor transpose_103 = transpose(perm = var_1109_perm_0, x = var_1108_cast)[name = tensor("transpose_103")]; + tensor query_states_23_cast = reshape(shape = var_1111, x = transpose_103)[name = tensor("query_states_23_cast")]; + tensor var_1113 = const()[name = tensor("op_1113"), val = tensor([20, -1, 64])]; + tensor transpose_105 = transpose(perm = var_1093_perm_0, x = var_1092_cast)[name = tensor("transpose_105")]; + tensor key_states_47_cast = reshape(shape = var_1113, x = transpose_105)[name = tensor("key_states_47_cast")]; + tensor var_1115 = const()[name = tensor("op_1115"), val = tensor([20, -1, 64])]; + tensor transpose_104 = transpose(perm = var_1100_perm_0, x = var_1099_cast)[name = tensor("transpose_104")]; + tensor value_states_47_cast = reshape(shape = var_1115, x = transpose_104)[name = tensor("value_states_47_cast")]; + tensor var_1118_perm_0 = const()[name = tensor("op_1118_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_67_transpose_x_0 = const()[name = tensor("attn_weights_67_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_67_transpose_y_0 = const()[name = tensor("attn_weights_67_transpose_y_0"), val = tensor(false)]; + tensor transpose_102 = transpose(perm = var_1118_perm_0, x = key_states_47_cast)[name = tensor("transpose_102")]; + tensor attn_weights_67_cast = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_23_cast, y = transpose_102)[name = tensor("attn_weights_67_cast")]; + tensor var_1120 = const()[name = tensor("op_1120"), val = tensor([1, 20, 77, 77])]; + tensor var_1121_cast = reshape(shape = var_1120, x = attn_weights_67_cast)[name = tensor("op_1121_cast")]; + tensor attn_weights_69_cast = add(x = var_1121_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_69_cast")]; + tensor var_1126 = const()[name = tensor("op_1126"), val = tensor([20, 77, 77])]; + tensor input_181_cast = reshape(shape = var_1126, x = attn_weights_69_cast)[name = tensor("input_181_cast")]; + tensor input_183_cast = softmax(axis = var_5, x = input_181_cast)[name = tensor("input_183_cast")]; + tensor attn_output_67_transpose_x_0 = const()[name = tensor("attn_output_67_transpose_x_0"), val = tensor(false)]; + tensor attn_output_67_transpose_y_0 = const()[name = tensor("attn_output_67_transpose_y_0"), val = tensor(false)]; + tensor attn_output_67_cast = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast, y = value_states_47_cast)[name = tensor("attn_output_67_cast")]; + tensor var_1131 = const()[name = tensor("op_1131"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_69_cast = reshape(shape = var_1131, x = attn_output_67_cast)[name = tensor("attn_output_69_cast")]; + tensor attn_output_71_perm_0 = const()[name = tensor("attn_output_71_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1134 = const()[name = tensor("op_1134"), val = tensor([1, 77, 1280])]; + tensor transpose_101 = transpose(perm = attn_output_71_perm_0, x = attn_output_69_cast)[name = tensor("transpose_101")]; + tensor input_185_cast = reshape(shape = var_1134, x = transpose_101)[name = tensor("input_185_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569452416)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572729280)))]; + tensor hidden_states_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16, x = input_185_cast)[name = tensor("hidden_states_69_cast")]; + tensor input_187_cast = add(x = input_179_cast, y = hidden_states_69_cast)[name = tensor("input_187_cast")]; + tensor input_189_axes_0 = const()[name = tensor("input_189_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572731904)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572734528)))]; + tensor input_189_cast = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast)[name = tensor("input_189_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572737152)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585844416)))]; + tensor input_191_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16, x = input_189_cast)[name = tensor("input_191_cast")]; + tensor input_193_mode_0 = const()[name = tensor("input_193_mode_0"), val = tensor("EXACT")]; + tensor input_193_cast = gelu(mode = input_193_mode_0, x = input_191_cast)[name = tensor("input_193_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585854720)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598961984)))]; + tensor hidden_states_71_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16, x = input_193_cast)[name = tensor("hidden_states_71_cast")]; + tensor input_195_cast = add(x = input_187_cast, y = hidden_states_71_cast)[name = tensor("input_195_cast")]; + tensor hidden_states_73_axes_0 = const()[name = tensor("hidden_states_73_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598964608)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598967232)))]; + tensor hidden_states_73_cast = layer_norm(axes = hidden_states_73_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16, x = input_195_cast)[name = tensor("hidden_states_73_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598969856)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602246720)))]; + tensor var_1172_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("op_1172_cast")]; + tensor var_1173_to_fp16 = const()[name = tensor("op_1173_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_77_cast = mul(x = var_1172_cast, y = var_1173_to_fp16)[name = tensor("tensor_77_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602249344)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605526208)))]; + tensor tensor_73_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_73_cast")]; + tensor var_1178 = const()[name = tensor("op_1178"), val = tensor([1, -1, 20, 64])]; + tensor var_1179_cast = reshape(shape = var_1178, x = tensor_73_cast)[name = tensor("op_1179_cast")]; + tensor var_1180_perm_0 = const()[name = tensor("op_1180_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605528832)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608805696)))]; + tensor tensor_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_75_cast")]; + tensor var_1185 = const()[name = tensor("op_1185"), val = tensor([1, -1, 20, 64])]; + tensor var_1186_cast = reshape(shape = var_1185, x = tensor_75_cast)[name = tensor("op_1186_cast")]; + tensor var_1187_perm_0 = const()[name = tensor("op_1187_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1194 = const()[name = tensor("op_1194"), val = tensor([1, 77, 20, 64])]; + tensor var_1195_cast = reshape(shape = var_1194, x = tensor_77_cast)[name = tensor("op_1195_cast")]; + tensor var_1196_perm_0 = const()[name = tensor("op_1196_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1198 = const()[name = tensor("op_1198"), val = tensor([20, -1, 64])]; + tensor transpose_98 = transpose(perm = var_1196_perm_0, x = var_1195_cast)[name = tensor("transpose_98")]; + tensor query_states_25_cast = reshape(shape = var_1198, x = transpose_98)[name = tensor("query_states_25_cast")]; + tensor var_1200 = const()[name = tensor("op_1200"), val = tensor([20, -1, 64])]; + tensor transpose_100 = transpose(perm = var_1180_perm_0, x = var_1179_cast)[name = tensor("transpose_100")]; + tensor key_states_51_cast = reshape(shape = var_1200, x = transpose_100)[name = tensor("key_states_51_cast")]; + tensor var_1202 = const()[name = tensor("op_1202"), val = tensor([20, -1, 64])]; + tensor transpose_99 = transpose(perm = var_1187_perm_0, x = var_1186_cast)[name = tensor("transpose_99")]; + tensor value_states_51_cast = reshape(shape = var_1202, x = transpose_99)[name = tensor("value_states_51_cast")]; + tensor var_1205_perm_0 = const()[name = tensor("op_1205_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor transpose_97 = transpose(perm = var_1205_perm_0, x = key_states_51_cast)[name = tensor("transpose_97")]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = query_states_25_cast, y = transpose_97)[name = tensor("attn_weights_73_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 20, 77, 77])]; + tensor var_1208_cast = reshape(shape = var_1207, x = attn_weights_73_cast)[name = tensor("op_1208_cast")]; + tensor attn_weights_75_cast = add(x = var_1208_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_1213 = const()[name = tensor("op_1213"), val = tensor([20, 77, 77])]; + tensor input_197_cast = reshape(shape = var_1213, x = attn_weights_75_cast)[name = tensor("input_197_cast")]; + tensor input_199_cast = softmax(axis = var_5, x = input_197_cast)[name = tensor("input_199_cast")]; + tensor attn_output_73_transpose_x_0 = const()[name = tensor("attn_output_73_transpose_x_0"), val = tensor(false)]; + tensor attn_output_73_transpose_y_0 = const()[name = tensor("attn_output_73_transpose_y_0"), val = tensor(false)]; + tensor attn_output_73_cast = matmul(transpose_x = attn_output_73_transpose_x_0, transpose_y = attn_output_73_transpose_y_0, x = input_199_cast, y = value_states_51_cast)[name = tensor("attn_output_73_cast")]; + tensor var_1218 = const()[name = tensor("op_1218"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_75_cast = reshape(shape = var_1218, x = attn_output_73_cast)[name = tensor("attn_output_75_cast")]; + tensor attn_output_77_perm_0 = const()[name = tensor("attn_output_77_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1221 = const()[name = tensor("op_1221"), val = tensor([1, 77, 1280])]; + tensor transpose_96 = transpose(perm = attn_output_77_perm_0, x = attn_output_75_cast)[name = tensor("transpose_96")]; + tensor input_201_cast = reshape(shape = var_1221, x = transpose_96)[name = tensor("input_201_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608808320)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612085184)))]; + tensor hidden_states_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16, x = input_201_cast)[name = tensor("hidden_states_75_cast")]; + tensor input_203_cast = add(x = input_195_cast, y = hidden_states_75_cast)[name = tensor("input_203_cast")]; + tensor input_205_axes_0 = const()[name = tensor("input_205_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612087808)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612090432)))]; + tensor input_205_cast = layer_norm(axes = input_205_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16, x = input_203_cast)[name = tensor("input_205_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612093056)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625200320)))]; + tensor input_207_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16, x = input_205_cast)[name = tensor("input_207_cast")]; + tensor input_209_mode_0 = const()[name = tensor("input_209_mode_0"), val = tensor("EXACT")]; + tensor input_209_cast = gelu(mode = input_209_mode_0, x = input_207_cast)[name = tensor("input_209_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625210624)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638317888)))]; + tensor hidden_states_77_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16, x = input_209_cast)[name = tensor("hidden_states_77_cast")]; + tensor input_211_cast = add(x = input_203_cast, y = hidden_states_77_cast)[name = tensor("input_211_cast")]; + tensor hidden_states_79_axes_0 = const()[name = tensor("hidden_states_79_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638320512)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638323136)))]; + tensor hidden_states_79_cast = layer_norm(axes = hidden_states_79_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16, x = input_211_cast)[name = tensor("hidden_states_79_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638325760)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641602624)))]; + tensor var_1259_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("op_1259_cast")]; + tensor var_1260_to_fp16 = const()[name = tensor("op_1260_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_83_cast = mul(x = var_1259_cast, y = var_1260_to_fp16)[name = tensor("tensor_83_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641605248)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644882112)))]; + tensor tensor_79_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_79_cast")]; + tensor var_1265 = const()[name = tensor("op_1265"), val = tensor([1, -1, 20, 64])]; + tensor var_1266_cast = reshape(shape = var_1265, x = tensor_79_cast)[name = tensor("op_1266_cast")]; + tensor var_1267_perm_0 = const()[name = tensor("op_1267_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644884736)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648161600)))]; + tensor tensor_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_81_cast")]; + tensor var_1272 = const()[name = tensor("op_1272"), val = tensor([1, -1, 20, 64])]; + tensor var_1273_cast = reshape(shape = var_1272, x = tensor_81_cast)[name = tensor("op_1273_cast")]; + tensor var_1274_perm_0 = const()[name = tensor("op_1274_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([1, 77, 20, 64])]; + tensor var_1282_cast = reshape(shape = var_1281, x = tensor_83_cast)[name = tensor("op_1282_cast")]; + tensor var_1283_perm_0 = const()[name = tensor("op_1283_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1285 = const()[name = tensor("op_1285"), val = tensor([20, -1, 64])]; + tensor transpose_93 = transpose(perm = var_1283_perm_0, x = var_1282_cast)[name = tensor("transpose_93")]; + tensor query_states_27_cast = reshape(shape = var_1285, x = transpose_93)[name = tensor("query_states_27_cast")]; + tensor var_1287 = const()[name = tensor("op_1287"), val = tensor([20, -1, 64])]; + tensor transpose_95 = transpose(perm = var_1267_perm_0, x = var_1266_cast)[name = tensor("transpose_95")]; + tensor key_states_55_cast = reshape(shape = var_1287, x = transpose_95)[name = tensor("key_states_55_cast")]; + tensor var_1289 = const()[name = tensor("op_1289"), val = tensor([20, -1, 64])]; + tensor transpose_94 = transpose(perm = var_1274_perm_0, x = var_1273_cast)[name = tensor("transpose_94")]; + tensor value_states_55_cast = reshape(shape = var_1289, x = transpose_94)[name = tensor("value_states_55_cast")]; + tensor var_1292_perm_0 = const()[name = tensor("op_1292_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_79_transpose_x_0 = const()[name = tensor("attn_weights_79_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_79_transpose_y_0 = const()[name = tensor("attn_weights_79_transpose_y_0"), val = tensor(false)]; + tensor transpose_92 = transpose(perm = var_1292_perm_0, x = key_states_55_cast)[name = tensor("transpose_92")]; + tensor attn_weights_79_cast = matmul(transpose_x = attn_weights_79_transpose_x_0, transpose_y = attn_weights_79_transpose_y_0, x = query_states_27_cast, y = transpose_92)[name = tensor("attn_weights_79_cast")]; + tensor var_1294 = const()[name = tensor("op_1294"), val = tensor([1, 20, 77, 77])]; + tensor var_1295_cast = reshape(shape = var_1294, x = attn_weights_79_cast)[name = tensor("op_1295_cast")]; + tensor attn_weights_81_cast = add(x = var_1295_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_81_cast")]; + tensor var_1300 = const()[name = tensor("op_1300"), val = tensor([20, 77, 77])]; + tensor input_213_cast = reshape(shape = var_1300, x = attn_weights_81_cast)[name = tensor("input_213_cast")]; + tensor input_215_cast = softmax(axis = var_5, x = input_213_cast)[name = tensor("input_215_cast")]; + tensor attn_output_79_transpose_x_0 = const()[name = tensor("attn_output_79_transpose_x_0"), val = tensor(false)]; + tensor attn_output_79_transpose_y_0 = const()[name = tensor("attn_output_79_transpose_y_0"), val = tensor(false)]; + tensor attn_output_79_cast = matmul(transpose_x = attn_output_79_transpose_x_0, transpose_y = attn_output_79_transpose_y_0, x = input_215_cast, y = value_states_55_cast)[name = tensor("attn_output_79_cast")]; + tensor var_1305 = const()[name = tensor("op_1305"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_81_cast = reshape(shape = var_1305, x = attn_output_79_cast)[name = tensor("attn_output_81_cast")]; + tensor attn_output_83_perm_0 = const()[name = tensor("attn_output_83_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1308 = const()[name = tensor("op_1308"), val = tensor([1, 77, 1280])]; + tensor transpose_91 = transpose(perm = attn_output_83_perm_0, x = attn_output_81_cast)[name = tensor("transpose_91")]; + tensor input_217_cast = reshape(shape = var_1308, x = transpose_91)[name = tensor("input_217_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648164224)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651441088)))]; + tensor hidden_states_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16, x = input_217_cast)[name = tensor("hidden_states_81_cast")]; + tensor input_219_cast = add(x = input_211_cast, y = hidden_states_81_cast)[name = tensor("input_219_cast")]; + tensor input_221_axes_0 = const()[name = tensor("input_221_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651443712)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651446336)))]; + tensor input_221_cast = layer_norm(axes = input_221_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16, x = input_219_cast)[name = tensor("input_221_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651448960)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664556224)))]; + tensor input_223_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16, x = input_221_cast)[name = tensor("input_223_cast")]; + tensor input_225_mode_0 = const()[name = tensor("input_225_mode_0"), val = tensor("EXACT")]; + tensor input_225_cast = gelu(mode = input_225_mode_0, x = input_223_cast)[name = tensor("input_225_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664566528)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677673792)))]; + tensor hidden_states_83_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16, x = input_225_cast)[name = tensor("hidden_states_83_cast")]; + tensor input_227_cast = add(x = input_219_cast, y = hidden_states_83_cast)[name = tensor("input_227_cast")]; + tensor hidden_states_85_axes_0 = const()[name = tensor("hidden_states_85_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677676416)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677679040)))]; + tensor hidden_states_85_cast = layer_norm(axes = hidden_states_85_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16, x = input_227_cast)[name = tensor("hidden_states_85_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677681664)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680958528)))]; + tensor var_1346_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("op_1346_cast")]; + tensor var_1347_to_fp16 = const()[name = tensor("op_1347_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_89_cast = mul(x = var_1346_cast, y = var_1347_to_fp16)[name = tensor("tensor_89_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680961152)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684238016)))]; + tensor tensor_85_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_85_cast")]; + tensor var_1352 = const()[name = tensor("op_1352"), val = tensor([1, -1, 20, 64])]; + tensor var_1353_cast = reshape(shape = var_1352, x = tensor_85_cast)[name = tensor("op_1353_cast")]; + tensor var_1354_perm_0 = const()[name = tensor("op_1354_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684240640)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687517504)))]; + tensor tensor_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_87_cast")]; + tensor var_1359 = const()[name = tensor("op_1359"), val = tensor([1, -1, 20, 64])]; + tensor var_1360_cast = reshape(shape = var_1359, x = tensor_87_cast)[name = tensor("op_1360_cast")]; + tensor var_1361_perm_0 = const()[name = tensor("op_1361_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1368 = const()[name = tensor("op_1368"), val = tensor([1, 77, 20, 64])]; + tensor var_1369_cast = reshape(shape = var_1368, x = tensor_89_cast)[name = tensor("op_1369_cast")]; + tensor var_1370_perm_0 = const()[name = tensor("op_1370_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1372 = const()[name = tensor("op_1372"), val = tensor([20, -1, 64])]; + tensor transpose_88 = transpose(perm = var_1370_perm_0, x = var_1369_cast)[name = tensor("transpose_88")]; + tensor query_states_29_cast = reshape(shape = var_1372, x = transpose_88)[name = tensor("query_states_29_cast")]; + tensor var_1374 = const()[name = tensor("op_1374"), val = tensor([20, -1, 64])]; + tensor transpose_90 = transpose(perm = var_1354_perm_0, x = var_1353_cast)[name = tensor("transpose_90")]; + tensor key_states_59_cast = reshape(shape = var_1374, x = transpose_90)[name = tensor("key_states_59_cast")]; + tensor var_1376 = const()[name = tensor("op_1376"), val = tensor([20, -1, 64])]; + tensor transpose_89 = transpose(perm = var_1361_perm_0, x = var_1360_cast)[name = tensor("transpose_89")]; + tensor value_states_59_cast = reshape(shape = var_1376, x = transpose_89)[name = tensor("value_states_59_cast")]; + tensor var_1379_perm_0 = const()[name = tensor("op_1379_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor transpose_87 = transpose(perm = var_1379_perm_0, x = key_states_59_cast)[name = tensor("transpose_87")]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = query_states_29_cast, y = transpose_87)[name = tensor("attn_weights_85_cast")]; + tensor var_1381 = const()[name = tensor("op_1381"), val = tensor([1, 20, 77, 77])]; + tensor var_1382_cast = reshape(shape = var_1381, x = attn_weights_85_cast)[name = tensor("op_1382_cast")]; + tensor attn_weights_87_cast = add(x = var_1382_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_1387 = const()[name = tensor("op_1387"), val = tensor([20, 77, 77])]; + tensor input_229_cast = reshape(shape = var_1387, x = attn_weights_87_cast)[name = tensor("input_229_cast")]; + tensor input_231_cast = softmax(axis = var_5, x = input_229_cast)[name = tensor("input_231_cast")]; + tensor attn_output_85_transpose_x_0 = const()[name = tensor("attn_output_85_transpose_x_0"), val = tensor(false)]; + tensor attn_output_85_transpose_y_0 = const()[name = tensor("attn_output_85_transpose_y_0"), val = tensor(false)]; + tensor attn_output_85_cast = matmul(transpose_x = attn_output_85_transpose_x_0, transpose_y = attn_output_85_transpose_y_0, x = input_231_cast, y = value_states_59_cast)[name = tensor("attn_output_85_cast")]; + tensor var_1392 = const()[name = tensor("op_1392"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_87_cast = reshape(shape = var_1392, x = attn_output_85_cast)[name = tensor("attn_output_87_cast")]; + tensor attn_output_89_perm_0 = const()[name = tensor("attn_output_89_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([1, 77, 1280])]; + tensor transpose_86 = transpose(perm = attn_output_89_perm_0, x = attn_output_87_cast)[name = tensor("transpose_86")]; + tensor input_233_cast = reshape(shape = var_1395, x = transpose_86)[name = tensor("input_233_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687520128)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690796992)))]; + tensor hidden_states_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16, x = input_233_cast)[name = tensor("hidden_states_87_cast")]; + tensor input_235_cast = add(x = input_227_cast, y = hidden_states_87_cast)[name = tensor("input_235_cast")]; + tensor input_237_axes_0 = const()[name = tensor("input_237_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690799616)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690802240)))]; + tensor input_237_cast = layer_norm(axes = input_237_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16, x = input_235_cast)[name = tensor("input_237_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690804864)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703912128)))]; + tensor input_239_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16, x = input_237_cast)[name = tensor("input_239_cast")]; + tensor input_241_mode_0 = const()[name = tensor("input_241_mode_0"), val = tensor("EXACT")]; + tensor input_241_cast = gelu(mode = input_241_mode_0, x = input_239_cast)[name = tensor("input_241_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703922432)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717029696)))]; + tensor hidden_states_89_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16, x = input_241_cast)[name = tensor("hidden_states_89_cast")]; + tensor input_243_cast = add(x = input_235_cast, y = hidden_states_89_cast)[name = tensor("input_243_cast")]; + tensor hidden_states_91_axes_0 = const()[name = tensor("hidden_states_91_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717032320)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717034944)))]; + tensor hidden_states_91_cast = layer_norm(axes = hidden_states_91_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16, x = input_243_cast)[name = tensor("hidden_states_91_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717037568)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720314432)))]; + tensor var_1433_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("op_1433_cast")]; + tensor var_1434_to_fp16 = const()[name = tensor("op_1434_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_95_cast = mul(x = var_1433_cast, y = var_1434_to_fp16)[name = tensor("tensor_95_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720317056)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723593920)))]; + tensor tensor_91_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_91_cast")]; + tensor var_1439 = const()[name = tensor("op_1439"), val = tensor([1, -1, 20, 64])]; + tensor var_1440_cast = reshape(shape = var_1439, x = tensor_91_cast)[name = tensor("op_1440_cast")]; + tensor var_1441_perm_0 = const()[name = tensor("op_1441_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723596544)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726873408)))]; + tensor tensor_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_93_cast")]; + tensor var_1446 = const()[name = tensor("op_1446"), val = tensor([1, -1, 20, 64])]; + tensor var_1447_cast = reshape(shape = var_1446, x = tensor_93_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_perm_0 = const()[name = tensor("op_1448_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1455 = const()[name = tensor("op_1455"), val = tensor([1, 77, 20, 64])]; + tensor var_1456_cast = reshape(shape = var_1455, x = tensor_95_cast)[name = tensor("op_1456_cast")]; + tensor var_1457_perm_0 = const()[name = tensor("op_1457_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1459 = const()[name = tensor("op_1459"), val = tensor([20, -1, 64])]; + tensor transpose_83 = transpose(perm = var_1457_perm_0, x = var_1456_cast)[name = tensor("transpose_83")]; + tensor query_states_31_cast = reshape(shape = var_1459, x = transpose_83)[name = tensor("query_states_31_cast")]; + tensor var_1461 = const()[name = tensor("op_1461"), val = tensor([20, -1, 64])]; + tensor transpose_85 = transpose(perm = var_1441_perm_0, x = var_1440_cast)[name = tensor("transpose_85")]; + tensor key_states_63_cast = reshape(shape = var_1461, x = transpose_85)[name = tensor("key_states_63_cast")]; + tensor var_1463 = const()[name = tensor("op_1463"), val = tensor([20, -1, 64])]; + tensor transpose_84 = transpose(perm = var_1448_perm_0, x = var_1447_cast)[name = tensor("transpose_84")]; + tensor value_states_63_cast = reshape(shape = var_1463, x = transpose_84)[name = tensor("value_states_63_cast")]; + tensor var_1466_perm_0 = const()[name = tensor("op_1466_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_91_transpose_x_0 = const()[name = tensor("attn_weights_91_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_91_transpose_y_0 = const()[name = tensor("attn_weights_91_transpose_y_0"), val = tensor(false)]; + tensor transpose_82 = transpose(perm = var_1466_perm_0, x = key_states_63_cast)[name = tensor("transpose_82")]; + tensor attn_weights_91_cast = matmul(transpose_x = attn_weights_91_transpose_x_0, transpose_y = attn_weights_91_transpose_y_0, x = query_states_31_cast, y = transpose_82)[name = tensor("attn_weights_91_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1, 20, 77, 77])]; + tensor var_1469_cast = reshape(shape = var_1468, x = attn_weights_91_cast)[name = tensor("op_1469_cast")]; + tensor attn_weights_93_cast = add(x = var_1469_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_93_cast")]; + tensor var_1474 = const()[name = tensor("op_1474"), val = tensor([20, 77, 77])]; + tensor input_245_cast = reshape(shape = var_1474, x = attn_weights_93_cast)[name = tensor("input_245_cast")]; + tensor input_247_cast = softmax(axis = var_5, x = input_245_cast)[name = tensor("input_247_cast")]; + tensor attn_output_91_transpose_x_0 = const()[name = tensor("attn_output_91_transpose_x_0"), val = tensor(false)]; + tensor attn_output_91_transpose_y_0 = const()[name = tensor("attn_output_91_transpose_y_0"), val = tensor(false)]; + tensor attn_output_91_cast = matmul(transpose_x = attn_output_91_transpose_x_0, transpose_y = attn_output_91_transpose_y_0, x = input_247_cast, y = value_states_63_cast)[name = tensor("attn_output_91_cast")]; + tensor var_1479 = const()[name = tensor("op_1479"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_93_cast = reshape(shape = var_1479, x = attn_output_91_cast)[name = tensor("attn_output_93_cast")]; + tensor attn_output_95_perm_0 = const()[name = tensor("attn_output_95_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1482 = const()[name = tensor("op_1482"), val = tensor([1, 77, 1280])]; + tensor transpose_81 = transpose(perm = attn_output_95_perm_0, x = attn_output_93_cast)[name = tensor("transpose_81")]; + tensor input_249_cast = reshape(shape = var_1482, x = transpose_81)[name = tensor("input_249_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726876032)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730152896)))]; + tensor hidden_states_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16, x = input_249_cast)[name = tensor("hidden_states_93_cast")]; + tensor input_251_cast = add(x = input_243_cast, y = hidden_states_93_cast)[name = tensor("input_251_cast")]; + tensor input_253_axes_0 = const()[name = tensor("input_253_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730155520)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730158144)))]; + tensor input_253_cast = layer_norm(axes = input_253_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16, x = input_251_cast)[name = tensor("input_253_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730160768)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743268032)))]; + tensor input_255_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16, x = input_253_cast)[name = tensor("input_255_cast")]; + tensor input_257_mode_0 = const()[name = tensor("input_257_mode_0"), val = tensor("EXACT")]; + tensor input_257_cast = gelu(mode = input_257_mode_0, x = input_255_cast)[name = tensor("input_257_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743278336)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756385600)))]; + tensor hidden_states_95_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16, x = input_257_cast)[name = tensor("hidden_states_95_cast")]; + tensor input_259_cast = add(x = input_251_cast, y = hidden_states_95_cast)[name = tensor("input_259_cast")]; + tensor hidden_states_97_axes_0 = const()[name = tensor("hidden_states_97_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756388224)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756390848)))]; + tensor hidden_states_97_cast = layer_norm(axes = hidden_states_97_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16, x = input_259_cast)[name = tensor("hidden_states_97_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756393472)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759670336)))]; + tensor var_1520_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("op_1520_cast")]; + tensor var_1521_to_fp16 = const()[name = tensor("op_1521_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_101_cast = mul(x = var_1520_cast, y = var_1521_to_fp16)[name = tensor("tensor_101_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759672960)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762949824)))]; + tensor tensor_97_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_97_cast")]; + tensor var_1526 = const()[name = tensor("op_1526"), val = tensor([1, -1, 20, 64])]; + tensor var_1527_cast = reshape(shape = var_1526, x = tensor_97_cast)[name = tensor("op_1527_cast")]; + tensor var_1528_perm_0 = const()[name = tensor("op_1528_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762952448)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766229312)))]; + tensor tensor_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_99_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1, -1, 20, 64])]; + tensor var_1534_cast = reshape(shape = var_1533, x = tensor_99_cast)[name = tensor("op_1534_cast")]; + tensor var_1535_perm_0 = const()[name = tensor("op_1535_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1542 = const()[name = tensor("op_1542"), val = tensor([1, 77, 20, 64])]; + tensor var_1543_cast = reshape(shape = var_1542, x = tensor_101_cast)[name = tensor("op_1543_cast")]; + tensor var_1544_perm_0 = const()[name = tensor("op_1544_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1546 = const()[name = tensor("op_1546"), val = tensor([20, -1, 64])]; + tensor transpose_78 = transpose(perm = var_1544_perm_0, x = var_1543_cast)[name = tensor("transpose_78")]; + tensor query_states_33_cast = reshape(shape = var_1546, x = transpose_78)[name = tensor("query_states_33_cast")]; + tensor var_1548 = const()[name = tensor("op_1548"), val = tensor([20, -1, 64])]; + tensor transpose_80 = transpose(perm = var_1528_perm_0, x = var_1527_cast)[name = tensor("transpose_80")]; + tensor key_states_67_cast = reshape(shape = var_1548, x = transpose_80)[name = tensor("key_states_67_cast")]; + tensor var_1550 = const()[name = tensor("op_1550"), val = tensor([20, -1, 64])]; + tensor transpose_79 = transpose(perm = var_1535_perm_0, x = var_1534_cast)[name = tensor("transpose_79")]; + tensor value_states_67_cast = reshape(shape = var_1550, x = transpose_79)[name = tensor("value_states_67_cast")]; + tensor var_1553_perm_0 = const()[name = tensor("op_1553_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor transpose_77 = transpose(perm = var_1553_perm_0, x = key_states_67_cast)[name = tensor("transpose_77")]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = query_states_33_cast, y = transpose_77)[name = tensor("attn_weights_97_cast")]; + tensor var_1555 = const()[name = tensor("op_1555"), val = tensor([1, 20, 77, 77])]; + tensor var_1556_cast = reshape(shape = var_1555, x = attn_weights_97_cast)[name = tensor("op_1556_cast")]; + tensor attn_weights_99_cast = add(x = var_1556_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_1561 = const()[name = tensor("op_1561"), val = tensor([20, 77, 77])]; + tensor input_261_cast = reshape(shape = var_1561, x = attn_weights_99_cast)[name = tensor("input_261_cast")]; + tensor input_263_cast = softmax(axis = var_5, x = input_261_cast)[name = tensor("input_263_cast")]; + tensor attn_output_97_transpose_x_0 = const()[name = tensor("attn_output_97_transpose_x_0"), val = tensor(false)]; + tensor attn_output_97_transpose_y_0 = const()[name = tensor("attn_output_97_transpose_y_0"), val = tensor(false)]; + tensor attn_output_97_cast = matmul(transpose_x = attn_output_97_transpose_x_0, transpose_y = attn_output_97_transpose_y_0, x = input_263_cast, y = value_states_67_cast)[name = tensor("attn_output_97_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_99_cast = reshape(shape = var_1566, x = attn_output_97_cast)[name = tensor("attn_output_99_cast")]; + tensor attn_output_101_perm_0 = const()[name = tensor("attn_output_101_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1569 = const()[name = tensor("op_1569"), val = tensor([1, 77, 1280])]; + tensor transpose_76 = transpose(perm = attn_output_101_perm_0, x = attn_output_99_cast)[name = tensor("transpose_76")]; + tensor input_265_cast = reshape(shape = var_1569, x = transpose_76)[name = tensor("input_265_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766231936)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769508800)))]; + tensor hidden_states_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16, x = input_265_cast)[name = tensor("hidden_states_99_cast")]; + tensor input_267_cast = add(x = input_259_cast, y = hidden_states_99_cast)[name = tensor("input_267_cast")]; + tensor input_269_axes_0 = const()[name = tensor("input_269_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769511424)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769514048)))]; + tensor input_269_cast = layer_norm(axes = input_269_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16, x = input_267_cast)[name = tensor("input_269_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769516672)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782623936)))]; + tensor input_271_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16, x = input_269_cast)[name = tensor("input_271_cast")]; + tensor input_273_mode_0 = const()[name = tensor("input_273_mode_0"), val = tensor("EXACT")]; + tensor input_273_cast = gelu(mode = input_273_mode_0, x = input_271_cast)[name = tensor("input_273_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782634240)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795741504)))]; + tensor hidden_states_101_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16, x = input_273_cast)[name = tensor("hidden_states_101_cast")]; + tensor input_275_cast = add(x = input_267_cast, y = hidden_states_101_cast)[name = tensor("input_275_cast")]; + tensor hidden_states_103_axes_0 = const()[name = tensor("hidden_states_103_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795744128)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795746752)))]; + tensor hidden_states_103_cast = layer_norm(axes = hidden_states_103_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16, x = input_275_cast)[name = tensor("hidden_states_103_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795749376)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799026240)))]; + tensor var_1607_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("op_1607_cast")]; + tensor var_1608_to_fp16 = const()[name = tensor("op_1608_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_107_cast = mul(x = var_1607_cast, y = var_1608_to_fp16)[name = tensor("tensor_107_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799028864)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802305728)))]; + tensor tensor_103_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_103_cast")]; + tensor var_1613 = const()[name = tensor("op_1613"), val = tensor([1, -1, 20, 64])]; + tensor var_1614_cast = reshape(shape = var_1613, x = tensor_103_cast)[name = tensor("op_1614_cast")]; + tensor var_1615_perm_0 = const()[name = tensor("op_1615_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802308352)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805585216)))]; + tensor tensor_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_105_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, -1, 20, 64])]; + tensor var_1621_cast = reshape(shape = var_1620, x = tensor_105_cast)[name = tensor("op_1621_cast")]; + tensor var_1622_perm_0 = const()[name = tensor("op_1622_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1629 = const()[name = tensor("op_1629"), val = tensor([1, 77, 20, 64])]; + tensor var_1630_cast = reshape(shape = var_1629, x = tensor_107_cast)[name = tensor("op_1630_cast")]; + tensor var_1631_perm_0 = const()[name = tensor("op_1631_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([20, -1, 64])]; + tensor transpose_73 = transpose(perm = var_1631_perm_0, x = var_1630_cast)[name = tensor("transpose_73")]; + tensor query_states_35_cast = reshape(shape = var_1633, x = transpose_73)[name = tensor("query_states_35_cast")]; + tensor var_1635 = const()[name = tensor("op_1635"), val = tensor([20, -1, 64])]; + tensor transpose_75 = transpose(perm = var_1615_perm_0, x = var_1614_cast)[name = tensor("transpose_75")]; + tensor key_states_71_cast = reshape(shape = var_1635, x = transpose_75)[name = tensor("key_states_71_cast")]; + tensor var_1637 = const()[name = tensor("op_1637"), val = tensor([20, -1, 64])]; + tensor transpose_74 = transpose(perm = var_1622_perm_0, x = var_1621_cast)[name = tensor("transpose_74")]; + tensor value_states_71_cast = reshape(shape = var_1637, x = transpose_74)[name = tensor("value_states_71_cast")]; + tensor var_1640_perm_0 = const()[name = tensor("op_1640_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_103_transpose_x_0 = const()[name = tensor("attn_weights_103_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_103_transpose_y_0 = const()[name = tensor("attn_weights_103_transpose_y_0"), val = tensor(false)]; + tensor transpose_72 = transpose(perm = var_1640_perm_0, x = key_states_71_cast)[name = tensor("transpose_72")]; + tensor attn_weights_103_cast = matmul(transpose_x = attn_weights_103_transpose_x_0, transpose_y = attn_weights_103_transpose_y_0, x = query_states_35_cast, y = transpose_72)[name = tensor("attn_weights_103_cast")]; + tensor var_1642 = const()[name = tensor("op_1642"), val = tensor([1, 20, 77, 77])]; + tensor var_1643_cast = reshape(shape = var_1642, x = attn_weights_103_cast)[name = tensor("op_1643_cast")]; + tensor attn_weights_105_cast = add(x = var_1643_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_105_cast")]; + tensor var_1648 = const()[name = tensor("op_1648"), val = tensor([20, 77, 77])]; + tensor input_277_cast = reshape(shape = var_1648, x = attn_weights_105_cast)[name = tensor("input_277_cast")]; + tensor input_279_cast = softmax(axis = var_5, x = input_277_cast)[name = tensor("input_279_cast")]; + tensor attn_output_103_transpose_x_0 = const()[name = tensor("attn_output_103_transpose_x_0"), val = tensor(false)]; + tensor attn_output_103_transpose_y_0 = const()[name = tensor("attn_output_103_transpose_y_0"), val = tensor(false)]; + tensor attn_output_103_cast = matmul(transpose_x = attn_output_103_transpose_x_0, transpose_y = attn_output_103_transpose_y_0, x = input_279_cast, y = value_states_71_cast)[name = tensor("attn_output_103_cast")]; + tensor var_1653 = const()[name = tensor("op_1653"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_105_cast = reshape(shape = var_1653, x = attn_output_103_cast)[name = tensor("attn_output_105_cast")]; + tensor attn_output_107_perm_0 = const()[name = tensor("attn_output_107_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1656 = const()[name = tensor("op_1656"), val = tensor([1, 77, 1280])]; + tensor transpose_71 = transpose(perm = attn_output_107_perm_0, x = attn_output_105_cast)[name = tensor("transpose_71")]; + tensor input_281_cast = reshape(shape = var_1656, x = transpose_71)[name = tensor("input_281_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805587840)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808864704)))]; + tensor hidden_states_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16, x = input_281_cast)[name = tensor("hidden_states_105_cast")]; + tensor input_283_cast = add(x = input_275_cast, y = hidden_states_105_cast)[name = tensor("input_283_cast")]; + tensor input_285_axes_0 = const()[name = tensor("input_285_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808867328)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808869952)))]; + tensor input_285_cast = layer_norm(axes = input_285_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16, x = input_283_cast)[name = tensor("input_285_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808872576)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821979840)))]; + tensor input_287_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16, x = input_285_cast)[name = tensor("input_287_cast")]; + tensor input_289_mode_0 = const()[name = tensor("input_289_mode_0"), val = tensor("EXACT")]; + tensor input_289_cast = gelu(mode = input_289_mode_0, x = input_287_cast)[name = tensor("input_289_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821990144)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835097408)))]; + tensor hidden_states_107_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16, x = input_289_cast)[name = tensor("hidden_states_107_cast")]; + tensor input_291_cast = add(x = input_283_cast, y = hidden_states_107_cast)[name = tensor("input_291_cast")]; + tensor hidden_states_109_axes_0 = const()[name = tensor("hidden_states_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835100032)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835102656)))]; + tensor hidden_states_109_cast = layer_norm(axes = hidden_states_109_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16, x = input_291_cast)[name = tensor("hidden_states_109_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835105280)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838382144)))]; + tensor var_1694_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("op_1694_cast")]; + tensor var_1695_to_fp16 = const()[name = tensor("op_1695_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_113_cast = mul(x = var_1694_cast, y = var_1695_to_fp16)[name = tensor("tensor_113_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838384768)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841661632)))]; + tensor tensor_109_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_109_cast")]; + tensor var_1700 = const()[name = tensor("op_1700"), val = tensor([1, -1, 20, 64])]; + tensor var_1701_cast = reshape(shape = var_1700, x = tensor_109_cast)[name = tensor("op_1701_cast")]; + tensor var_1702_perm_0 = const()[name = tensor("op_1702_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841664256)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844941120)))]; + tensor tensor_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_111_cast")]; + tensor var_1707 = const()[name = tensor("op_1707"), val = tensor([1, -1, 20, 64])]; + tensor var_1708_cast = reshape(shape = var_1707, x = tensor_111_cast)[name = tensor("op_1708_cast")]; + tensor var_1709_perm_0 = const()[name = tensor("op_1709_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1716 = const()[name = tensor("op_1716"), val = tensor([1, 77, 20, 64])]; + tensor var_1717_cast = reshape(shape = var_1716, x = tensor_113_cast)[name = tensor("op_1717_cast")]; + tensor var_1718_perm_0 = const()[name = tensor("op_1718_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1720 = const()[name = tensor("op_1720"), val = tensor([20, -1, 64])]; + tensor transpose_68 = transpose(perm = var_1718_perm_0, x = var_1717_cast)[name = tensor("transpose_68")]; + tensor query_states_37_cast = reshape(shape = var_1720, x = transpose_68)[name = tensor("query_states_37_cast")]; + tensor var_1722 = const()[name = tensor("op_1722"), val = tensor([20, -1, 64])]; + tensor transpose_70 = transpose(perm = var_1702_perm_0, x = var_1701_cast)[name = tensor("transpose_70")]; + tensor key_states_75_cast = reshape(shape = var_1722, x = transpose_70)[name = tensor("key_states_75_cast")]; + tensor var_1724 = const()[name = tensor("op_1724"), val = tensor([20, -1, 64])]; + tensor transpose_69 = transpose(perm = var_1709_perm_0, x = var_1708_cast)[name = tensor("transpose_69")]; + tensor value_states_75_cast = reshape(shape = var_1724, x = transpose_69)[name = tensor("value_states_75_cast")]; + tensor var_1727_perm_0 = const()[name = tensor("op_1727_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor transpose_67 = transpose(perm = var_1727_perm_0, x = key_states_75_cast)[name = tensor("transpose_67")]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = query_states_37_cast, y = transpose_67)[name = tensor("attn_weights_109_cast")]; + tensor var_1729 = const()[name = tensor("op_1729"), val = tensor([1, 20, 77, 77])]; + tensor var_1730_cast = reshape(shape = var_1729, x = attn_weights_109_cast)[name = tensor("op_1730_cast")]; + tensor attn_weights_111_cast = add(x = var_1730_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_1735 = const()[name = tensor("op_1735"), val = tensor([20, 77, 77])]; + tensor input_293_cast = reshape(shape = var_1735, x = attn_weights_111_cast)[name = tensor("input_293_cast")]; + tensor input_295_cast = softmax(axis = var_5, x = input_293_cast)[name = tensor("input_295_cast")]; + tensor attn_output_109_transpose_x_0 = const()[name = tensor("attn_output_109_transpose_x_0"), val = tensor(false)]; + tensor attn_output_109_transpose_y_0 = const()[name = tensor("attn_output_109_transpose_y_0"), val = tensor(false)]; + tensor attn_output_109_cast = matmul(transpose_x = attn_output_109_transpose_x_0, transpose_y = attn_output_109_transpose_y_0, x = input_295_cast, y = value_states_75_cast)[name = tensor("attn_output_109_cast")]; + tensor var_1740 = const()[name = tensor("op_1740"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_111_cast = reshape(shape = var_1740, x = attn_output_109_cast)[name = tensor("attn_output_111_cast")]; + tensor attn_output_113_perm_0 = const()[name = tensor("attn_output_113_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 77, 1280])]; + tensor transpose_66 = transpose(perm = attn_output_113_perm_0, x = attn_output_111_cast)[name = tensor("transpose_66")]; + tensor input_297_cast = reshape(shape = var_1743, x = transpose_66)[name = tensor("input_297_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844943744)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848220608)))]; + tensor hidden_states_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16, x = input_297_cast)[name = tensor("hidden_states_111_cast")]; + tensor input_299_cast = add(x = input_291_cast, y = hidden_states_111_cast)[name = tensor("input_299_cast")]; + tensor input_301_axes_0 = const()[name = tensor("input_301_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848223232)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848225856)))]; + tensor input_301_cast = layer_norm(axes = input_301_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16, x = input_299_cast)[name = tensor("input_301_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848228480)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861335744)))]; + tensor input_303_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16, x = input_301_cast)[name = tensor("input_303_cast")]; + tensor input_305_mode_0 = const()[name = tensor("input_305_mode_0"), val = tensor("EXACT")]; + tensor input_305_cast = gelu(mode = input_305_mode_0, x = input_303_cast)[name = tensor("input_305_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861346048)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874453312)))]; + tensor hidden_states_113_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16, x = input_305_cast)[name = tensor("hidden_states_113_cast")]; + tensor input_307_cast = add(x = input_299_cast, y = hidden_states_113_cast)[name = tensor("input_307_cast")]; + tensor hidden_states_115_axes_0 = const()[name = tensor("hidden_states_115_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874455936)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874458560)))]; + tensor hidden_states_115_cast = layer_norm(axes = hidden_states_115_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16, x = input_307_cast)[name = tensor("hidden_states_115_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874461184)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877738048)))]; + tensor var_1781_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("op_1781_cast")]; + tensor var_1782_to_fp16 = const()[name = tensor("op_1782_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_119_cast = mul(x = var_1781_cast, y = var_1782_to_fp16)[name = tensor("tensor_119_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877740672)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881017536)))]; + tensor tensor_115_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_115_cast")]; + tensor var_1787 = const()[name = tensor("op_1787"), val = tensor([1, -1, 20, 64])]; + tensor var_1788_cast = reshape(shape = var_1787, x = tensor_115_cast)[name = tensor("op_1788_cast")]; + tensor var_1789_perm_0 = const()[name = tensor("op_1789_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881020160)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884297024)))]; + tensor tensor_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_117_cast")]; + tensor var_1794 = const()[name = tensor("op_1794"), val = tensor([1, -1, 20, 64])]; + tensor var_1795_cast = reshape(shape = var_1794, x = tensor_117_cast)[name = tensor("op_1795_cast")]; + tensor var_1796_perm_0 = const()[name = tensor("op_1796_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1803 = const()[name = tensor("op_1803"), val = tensor([1, 77, 20, 64])]; + tensor var_1804_cast = reshape(shape = var_1803, x = tensor_119_cast)[name = tensor("op_1804_cast")]; + tensor var_1805_perm_0 = const()[name = tensor("op_1805_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1807 = const()[name = tensor("op_1807"), val = tensor([20, -1, 64])]; + tensor transpose_63 = transpose(perm = var_1805_perm_0, x = var_1804_cast)[name = tensor("transpose_63")]; + tensor query_states_39_cast = reshape(shape = var_1807, x = transpose_63)[name = tensor("query_states_39_cast")]; + tensor var_1809 = const()[name = tensor("op_1809"), val = tensor([20, -1, 64])]; + tensor transpose_65 = transpose(perm = var_1789_perm_0, x = var_1788_cast)[name = tensor("transpose_65")]; + tensor key_states_79_cast = reshape(shape = var_1809, x = transpose_65)[name = tensor("key_states_79_cast")]; + tensor var_1811 = const()[name = tensor("op_1811"), val = tensor([20, -1, 64])]; + tensor transpose_64 = transpose(perm = var_1796_perm_0, x = var_1795_cast)[name = tensor("transpose_64")]; + tensor value_states_79_cast = reshape(shape = var_1811, x = transpose_64)[name = tensor("value_states_79_cast")]; + tensor var_1814_perm_0 = const()[name = tensor("op_1814_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_115_transpose_x_0 = const()[name = tensor("attn_weights_115_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_115_transpose_y_0 = const()[name = tensor("attn_weights_115_transpose_y_0"), val = tensor(false)]; + tensor transpose_62 = transpose(perm = var_1814_perm_0, x = key_states_79_cast)[name = tensor("transpose_62")]; + tensor attn_weights_115_cast = matmul(transpose_x = attn_weights_115_transpose_x_0, transpose_y = attn_weights_115_transpose_y_0, x = query_states_39_cast, y = transpose_62)[name = tensor("attn_weights_115_cast")]; + tensor var_1816 = const()[name = tensor("op_1816"), val = tensor([1, 20, 77, 77])]; + tensor var_1817_cast = reshape(shape = var_1816, x = attn_weights_115_cast)[name = tensor("op_1817_cast")]; + tensor attn_weights_117_cast = add(x = var_1817_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_117_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([20, 77, 77])]; + tensor input_309_cast = reshape(shape = var_1822, x = attn_weights_117_cast)[name = tensor("input_309_cast")]; + tensor input_311_cast = softmax(axis = var_5, x = input_309_cast)[name = tensor("input_311_cast")]; + tensor attn_output_115_transpose_x_0 = const()[name = tensor("attn_output_115_transpose_x_0"), val = tensor(false)]; + tensor attn_output_115_transpose_y_0 = const()[name = tensor("attn_output_115_transpose_y_0"), val = tensor(false)]; + tensor attn_output_115_cast = matmul(transpose_x = attn_output_115_transpose_x_0, transpose_y = attn_output_115_transpose_y_0, x = input_311_cast, y = value_states_79_cast)[name = tensor("attn_output_115_cast")]; + tensor var_1827 = const()[name = tensor("op_1827"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_117_cast = reshape(shape = var_1827, x = attn_output_115_cast)[name = tensor("attn_output_117_cast")]; + tensor attn_output_119_perm_0 = const()[name = tensor("attn_output_119_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1830 = const()[name = tensor("op_1830"), val = tensor([1, 77, 1280])]; + tensor transpose_61 = transpose(perm = attn_output_119_perm_0, x = attn_output_117_cast)[name = tensor("transpose_61")]; + tensor input_313_cast = reshape(shape = var_1830, x = transpose_61)[name = tensor("input_313_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884299648)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887576512)))]; + tensor hidden_states_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16, x = input_313_cast)[name = tensor("hidden_states_117_cast")]; + tensor input_315_cast = add(x = input_307_cast, y = hidden_states_117_cast)[name = tensor("input_315_cast")]; + tensor input_317_axes_0 = const()[name = tensor("input_317_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887579136)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887581760)))]; + tensor input_317_cast = layer_norm(axes = input_317_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16, x = input_315_cast)[name = tensor("input_317_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887584384)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900691648)))]; + tensor input_319_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16, x = input_317_cast)[name = tensor("input_319_cast")]; + tensor input_321_mode_0 = const()[name = tensor("input_321_mode_0"), val = tensor("EXACT")]; + tensor input_321_cast = gelu(mode = input_321_mode_0, x = input_319_cast)[name = tensor("input_321_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900701952)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913809216)))]; + tensor hidden_states_119_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16, x = input_321_cast)[name = tensor("hidden_states_119_cast")]; + tensor input_323_cast = add(x = input_315_cast, y = hidden_states_119_cast)[name = tensor("input_323_cast")]; + tensor hidden_states_121_axes_0 = const()[name = tensor("hidden_states_121_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913811840)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913814464)))]; + tensor hidden_states_121_cast = layer_norm(axes = hidden_states_121_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16, x = input_323_cast)[name = tensor("hidden_states_121_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913817088)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917093952)))]; + tensor var_1868_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("op_1868_cast")]; + tensor var_1869_to_fp16 = const()[name = tensor("op_1869_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_125_cast = mul(x = var_1868_cast, y = var_1869_to_fp16)[name = tensor("tensor_125_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917096576)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920373440)))]; + tensor tensor_121_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_121_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([1, -1, 20, 64])]; + tensor var_1875_cast = reshape(shape = var_1874, x = tensor_121_cast)[name = tensor("op_1875_cast")]; + tensor var_1876_perm_0 = const()[name = tensor("op_1876_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920376064)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923652928)))]; + tensor tensor_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_123_cast")]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, -1, 20, 64])]; + tensor var_1882_cast = reshape(shape = var_1881, x = tensor_123_cast)[name = tensor("op_1882_cast")]; + tensor var_1883_perm_0 = const()[name = tensor("op_1883_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1890 = const()[name = tensor("op_1890"), val = tensor([1, 77, 20, 64])]; + tensor var_1891_cast = reshape(shape = var_1890, x = tensor_125_cast)[name = tensor("op_1891_cast")]; + tensor var_1892_perm_0 = const()[name = tensor("op_1892_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1894 = const()[name = tensor("op_1894"), val = tensor([20, -1, 64])]; + tensor transpose_58 = transpose(perm = var_1892_perm_0, x = var_1891_cast)[name = tensor("transpose_58")]; + tensor query_states_41_cast = reshape(shape = var_1894, x = transpose_58)[name = tensor("query_states_41_cast")]; + tensor var_1896 = const()[name = tensor("op_1896"), val = tensor([20, -1, 64])]; + tensor transpose_60 = transpose(perm = var_1876_perm_0, x = var_1875_cast)[name = tensor("transpose_60")]; + tensor key_states_83_cast = reshape(shape = var_1896, x = transpose_60)[name = tensor("key_states_83_cast")]; + tensor var_1898 = const()[name = tensor("op_1898"), val = tensor([20, -1, 64])]; + tensor transpose_59 = transpose(perm = var_1883_perm_0, x = var_1882_cast)[name = tensor("transpose_59")]; + tensor value_states_83_cast = reshape(shape = var_1898, x = transpose_59)[name = tensor("value_states_83_cast")]; + tensor var_1901_perm_0 = const()[name = tensor("op_1901_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor transpose_57 = transpose(perm = var_1901_perm_0, x = key_states_83_cast)[name = tensor("transpose_57")]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = query_states_41_cast, y = transpose_57)[name = tensor("attn_weights_121_cast")]; + tensor var_1903 = const()[name = tensor("op_1903"), val = tensor([1, 20, 77, 77])]; + tensor var_1904_cast = reshape(shape = var_1903, x = attn_weights_121_cast)[name = tensor("op_1904_cast")]; + tensor attn_weights_123_cast = add(x = var_1904_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_1909 = const()[name = tensor("op_1909"), val = tensor([20, 77, 77])]; + tensor input_325_cast = reshape(shape = var_1909, x = attn_weights_123_cast)[name = tensor("input_325_cast")]; + tensor input_327_cast = softmax(axis = var_5, x = input_325_cast)[name = tensor("input_327_cast")]; + tensor attn_output_121_transpose_x_0 = const()[name = tensor("attn_output_121_transpose_x_0"), val = tensor(false)]; + tensor attn_output_121_transpose_y_0 = const()[name = tensor("attn_output_121_transpose_y_0"), val = tensor(false)]; + tensor attn_output_121_cast = matmul(transpose_x = attn_output_121_transpose_x_0, transpose_y = attn_output_121_transpose_y_0, x = input_327_cast, y = value_states_83_cast)[name = tensor("attn_output_121_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_123_cast = reshape(shape = var_1914, x = attn_output_121_cast)[name = tensor("attn_output_123_cast")]; + tensor attn_output_125_perm_0 = const()[name = tensor("attn_output_125_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1917 = const()[name = tensor("op_1917"), val = tensor([1, 77, 1280])]; + tensor transpose_56 = transpose(perm = attn_output_125_perm_0, x = attn_output_123_cast)[name = tensor("transpose_56")]; + tensor input_329_cast = reshape(shape = var_1917, x = transpose_56)[name = tensor("input_329_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923655552)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926932416)))]; + tensor hidden_states_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16, x = input_329_cast)[name = tensor("hidden_states_123_cast")]; + tensor input_331_cast = add(x = input_323_cast, y = hidden_states_123_cast)[name = tensor("input_331_cast")]; + tensor input_333_axes_0 = const()[name = tensor("input_333_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926935040)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926937664)))]; + tensor input_333_cast = layer_norm(axes = input_333_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16, x = input_331_cast)[name = tensor("input_333_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926940288)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940047552)))]; + tensor input_335_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16, x = input_333_cast)[name = tensor("input_335_cast")]; + tensor input_337_mode_0 = const()[name = tensor("input_337_mode_0"), val = tensor("EXACT")]; + tensor input_337_cast = gelu(mode = input_337_mode_0, x = input_335_cast)[name = tensor("input_337_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940057856)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953165120)))]; + tensor hidden_states_125_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16, x = input_337_cast)[name = tensor("hidden_states_125_cast")]; + tensor input_339_cast = add(x = input_331_cast, y = hidden_states_125_cast)[name = tensor("input_339_cast")]; + tensor hidden_states_127_axes_0 = const()[name = tensor("hidden_states_127_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953167744)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953170368)))]; + tensor hidden_states_127_cast = layer_norm(axes = hidden_states_127_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16, x = input_339_cast)[name = tensor("hidden_states_127_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953172992)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956449856)))]; + tensor var_1955_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("op_1955_cast")]; + tensor var_1956_to_fp16 = const()[name = tensor("op_1956_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_131_cast = mul(x = var_1955_cast, y = var_1956_to_fp16)[name = tensor("tensor_131_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956452480)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959729344)))]; + tensor tensor_127_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_127_cast")]; + tensor var_1961 = const()[name = tensor("op_1961"), val = tensor([1, -1, 20, 64])]; + tensor var_1962_cast = reshape(shape = var_1961, x = tensor_127_cast)[name = tensor("op_1962_cast")]; + tensor var_1963_perm_0 = const()[name = tensor("op_1963_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959731968)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963008832)))]; + tensor tensor_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_129_cast")]; + tensor var_1968 = const()[name = tensor("op_1968"), val = tensor([1, -1, 20, 64])]; + tensor var_1969_cast = reshape(shape = var_1968, x = tensor_129_cast)[name = tensor("op_1969_cast")]; + tensor var_1970_perm_0 = const()[name = tensor("op_1970_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1977 = const()[name = tensor("op_1977"), val = tensor([1, 77, 20, 64])]; + tensor var_1978_cast = reshape(shape = var_1977, x = tensor_131_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_perm_0 = const()[name = tensor("op_1979_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1981 = const()[name = tensor("op_1981"), val = tensor([20, -1, 64])]; + tensor transpose_53 = transpose(perm = var_1979_perm_0, x = var_1978_cast)[name = tensor("transpose_53")]; + tensor query_states_43_cast = reshape(shape = var_1981, x = transpose_53)[name = tensor("query_states_43_cast")]; + tensor var_1983 = const()[name = tensor("op_1983"), val = tensor([20, -1, 64])]; + tensor transpose_55 = transpose(perm = var_1963_perm_0, x = var_1962_cast)[name = tensor("transpose_55")]; + tensor key_states_87_cast = reshape(shape = var_1983, x = transpose_55)[name = tensor("key_states_87_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([20, -1, 64])]; + tensor transpose_54 = transpose(perm = var_1970_perm_0, x = var_1969_cast)[name = tensor("transpose_54")]; + tensor value_states_87_cast = reshape(shape = var_1985, x = transpose_54)[name = tensor("value_states_87_cast")]; + tensor var_1988_perm_0 = const()[name = tensor("op_1988_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_127_transpose_x_0 = const()[name = tensor("attn_weights_127_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_127_transpose_y_0 = const()[name = tensor("attn_weights_127_transpose_y_0"), val = tensor(false)]; + tensor transpose_52 = transpose(perm = var_1988_perm_0, x = key_states_87_cast)[name = tensor("transpose_52")]; + tensor attn_weights_127_cast = matmul(transpose_x = attn_weights_127_transpose_x_0, transpose_y = attn_weights_127_transpose_y_0, x = query_states_43_cast, y = transpose_52)[name = tensor("attn_weights_127_cast")]; + tensor var_1990 = const()[name = tensor("op_1990"), val = tensor([1, 20, 77, 77])]; + tensor var_1991_cast = reshape(shape = var_1990, x = attn_weights_127_cast)[name = tensor("op_1991_cast")]; + tensor attn_weights_129_cast = add(x = var_1991_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_129_cast")]; + tensor var_1996 = const()[name = tensor("op_1996"), val = tensor([20, 77, 77])]; + tensor input_341_cast = reshape(shape = var_1996, x = attn_weights_129_cast)[name = tensor("input_341_cast")]; + tensor input_343_cast = softmax(axis = var_5, x = input_341_cast)[name = tensor("input_343_cast")]; + tensor attn_output_127_transpose_x_0 = const()[name = tensor("attn_output_127_transpose_x_0"), val = tensor(false)]; + tensor attn_output_127_transpose_y_0 = const()[name = tensor("attn_output_127_transpose_y_0"), val = tensor(false)]; + tensor attn_output_127_cast = matmul(transpose_x = attn_output_127_transpose_x_0, transpose_y = attn_output_127_transpose_y_0, x = input_343_cast, y = value_states_87_cast)[name = tensor("attn_output_127_cast")]; + tensor var_2001 = const()[name = tensor("op_2001"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_129_cast = reshape(shape = var_2001, x = attn_output_127_cast)[name = tensor("attn_output_129_cast")]; + tensor attn_output_131_perm_0 = const()[name = tensor("attn_output_131_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2004 = const()[name = tensor("op_2004"), val = tensor([1, 77, 1280])]; + tensor transpose_51 = transpose(perm = attn_output_131_perm_0, x = attn_output_129_cast)[name = tensor("transpose_51")]; + tensor input_345_cast = reshape(shape = var_2004, x = transpose_51)[name = tensor("input_345_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963011456)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966288320)))]; + tensor hidden_states_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16, x = input_345_cast)[name = tensor("hidden_states_129_cast")]; + tensor input_347_cast = add(x = input_339_cast, y = hidden_states_129_cast)[name = tensor("input_347_cast")]; + tensor input_349_axes_0 = const()[name = tensor("input_349_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966290944)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966293568)))]; + tensor input_349_cast = layer_norm(axes = input_349_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16, x = input_347_cast)[name = tensor("input_349_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966296192)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979403456)))]; + tensor input_351_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16, x = input_349_cast)[name = tensor("input_351_cast")]; + tensor input_353_mode_0 = const()[name = tensor("input_353_mode_0"), val = tensor("EXACT")]; + tensor input_353_cast = gelu(mode = input_353_mode_0, x = input_351_cast)[name = tensor("input_353_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979413760)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992521024)))]; + tensor hidden_states_131_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16, x = input_353_cast)[name = tensor("hidden_states_131_cast")]; + tensor input_355_cast = add(x = input_347_cast, y = hidden_states_131_cast)[name = tensor("input_355_cast")]; + tensor hidden_states_133_axes_0 = const()[name = tensor("hidden_states_133_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992523648)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992526272)))]; + tensor hidden_states_133_cast = layer_norm(axes = hidden_states_133_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16, x = input_355_cast)[name = tensor("hidden_states_133_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992528896)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995805760)))]; + tensor var_2042_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("op_2042_cast")]; + tensor var_2043_to_fp16 = const()[name = tensor("op_2043_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_137_cast = mul(x = var_2042_cast, y = var_2043_to_fp16)[name = tensor("tensor_137_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995808384)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999085248)))]; + tensor tensor_133_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_133_cast")]; + tensor var_2048 = const()[name = tensor("op_2048"), val = tensor([1, -1, 20, 64])]; + tensor var_2049_cast = reshape(shape = var_2048, x = tensor_133_cast)[name = tensor("op_2049_cast")]; + tensor var_2050_perm_0 = const()[name = tensor("op_2050_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999087872)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002364736)))]; + tensor tensor_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_135_cast")]; + tensor var_2055 = const()[name = tensor("op_2055"), val = tensor([1, -1, 20, 64])]; + tensor var_2056_cast = reshape(shape = var_2055, x = tensor_135_cast)[name = tensor("op_2056_cast")]; + tensor var_2057_perm_0 = const()[name = tensor("op_2057_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1, 77, 20, 64])]; + tensor var_2065_cast = reshape(shape = var_2064, x = tensor_137_cast)[name = tensor("op_2065_cast")]; + tensor var_2066_perm_0 = const()[name = tensor("op_2066_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([20, -1, 64])]; + tensor transpose_48 = transpose(perm = var_2066_perm_0, x = var_2065_cast)[name = tensor("transpose_48")]; + tensor query_states_45_cast = reshape(shape = var_2068, x = transpose_48)[name = tensor("query_states_45_cast")]; + tensor var_2070 = const()[name = tensor("op_2070"), val = tensor([20, -1, 64])]; + tensor transpose_50 = transpose(perm = var_2050_perm_0, x = var_2049_cast)[name = tensor("transpose_50")]; + tensor key_states_91_cast = reshape(shape = var_2070, x = transpose_50)[name = tensor("key_states_91_cast")]; + tensor var_2072 = const()[name = tensor("op_2072"), val = tensor([20, -1, 64])]; + tensor transpose_49 = transpose(perm = var_2057_perm_0, x = var_2056_cast)[name = tensor("transpose_49")]; + tensor value_states_91_cast = reshape(shape = var_2072, x = transpose_49)[name = tensor("value_states_91_cast")]; + tensor var_2075_perm_0 = const()[name = tensor("op_2075_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor transpose_47 = transpose(perm = var_2075_perm_0, x = key_states_91_cast)[name = tensor("transpose_47")]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = query_states_45_cast, y = transpose_47)[name = tensor("attn_weights_133_cast")]; + tensor var_2077 = const()[name = tensor("op_2077"), val = tensor([1, 20, 77, 77])]; + tensor var_2078_cast = reshape(shape = var_2077, x = attn_weights_133_cast)[name = tensor("op_2078_cast")]; + tensor attn_weights_135_cast = add(x = var_2078_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_2083 = const()[name = tensor("op_2083"), val = tensor([20, 77, 77])]; + tensor input_357_cast = reshape(shape = var_2083, x = attn_weights_135_cast)[name = tensor("input_357_cast")]; + tensor input_359_cast = softmax(axis = var_5, x = input_357_cast)[name = tensor("input_359_cast")]; + tensor attn_output_133_transpose_x_0 = const()[name = tensor("attn_output_133_transpose_x_0"), val = tensor(false)]; + tensor attn_output_133_transpose_y_0 = const()[name = tensor("attn_output_133_transpose_y_0"), val = tensor(false)]; + tensor attn_output_133_cast = matmul(transpose_x = attn_output_133_transpose_x_0, transpose_y = attn_output_133_transpose_y_0, x = input_359_cast, y = value_states_91_cast)[name = tensor("attn_output_133_cast")]; + tensor var_2088 = const()[name = tensor("op_2088"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_135_cast = reshape(shape = var_2088, x = attn_output_133_cast)[name = tensor("attn_output_135_cast")]; + tensor attn_output_137_perm_0 = const()[name = tensor("attn_output_137_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 77, 1280])]; + tensor transpose_46 = transpose(perm = attn_output_137_perm_0, x = attn_output_135_cast)[name = tensor("transpose_46")]; + tensor input_361_cast = reshape(shape = var_2091, x = transpose_46)[name = tensor("input_361_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002367360)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005644224)))]; + tensor hidden_states_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16, x = input_361_cast)[name = tensor("hidden_states_135_cast")]; + tensor input_363_cast = add(x = input_355_cast, y = hidden_states_135_cast)[name = tensor("input_363_cast")]; + tensor input_365_axes_0 = const()[name = tensor("input_365_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005646848)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005649472)))]; + tensor input_365_cast = layer_norm(axes = input_365_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16, x = input_363_cast)[name = tensor("input_365_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005652096)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018759360)))]; + tensor input_367_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16, x = input_365_cast)[name = tensor("input_367_cast")]; + tensor input_369_mode_0 = const()[name = tensor("input_369_mode_0"), val = tensor("EXACT")]; + tensor input_369_cast = gelu(mode = input_369_mode_0, x = input_367_cast)[name = tensor("input_369_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018769664)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031876928)))]; + tensor hidden_states_137_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16, x = input_369_cast)[name = tensor("hidden_states_137_cast")]; + tensor input_371_cast = add(x = input_363_cast, y = hidden_states_137_cast)[name = tensor("input_371_cast")]; + tensor hidden_states_139_axes_0 = const()[name = tensor("hidden_states_139_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031879552)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031882176)))]; + tensor hidden_states_139_cast = layer_norm(axes = hidden_states_139_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16, x = input_371_cast)[name = tensor("hidden_states_139_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031884800)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035161664)))]; + tensor var_2129_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("op_2129_cast")]; + tensor var_2130_to_fp16 = const()[name = tensor("op_2130_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_143_cast = mul(x = var_2129_cast, y = var_2130_to_fp16)[name = tensor("tensor_143_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035164288)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038441152)))]; + tensor tensor_139_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_139_cast")]; + tensor var_2135 = const()[name = tensor("op_2135"), val = tensor([1, -1, 20, 64])]; + tensor var_2136_cast = reshape(shape = var_2135, x = tensor_139_cast)[name = tensor("op_2136_cast")]; + tensor var_2137_perm_0 = const()[name = tensor("op_2137_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038443776)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041720640)))]; + tensor tensor_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_141_cast")]; + tensor var_2142 = const()[name = tensor("op_2142"), val = tensor([1, -1, 20, 64])]; + tensor var_2143_cast = reshape(shape = var_2142, x = tensor_141_cast)[name = tensor("op_2143_cast")]; + tensor var_2144_perm_0 = const()[name = tensor("op_2144_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 77, 20, 64])]; + tensor var_2152_cast = reshape(shape = var_2151, x = tensor_143_cast)[name = tensor("op_2152_cast")]; + tensor var_2153_perm_0 = const()[name = tensor("op_2153_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2155 = const()[name = tensor("op_2155"), val = tensor([20, -1, 64])]; + tensor transpose_43 = transpose(perm = var_2153_perm_0, x = var_2152_cast)[name = tensor("transpose_43")]; + tensor query_states_47_cast = reshape(shape = var_2155, x = transpose_43)[name = tensor("query_states_47_cast")]; + tensor var_2157 = const()[name = tensor("op_2157"), val = tensor([20, -1, 64])]; + tensor transpose_45 = transpose(perm = var_2137_perm_0, x = var_2136_cast)[name = tensor("transpose_45")]; + tensor key_states_95_cast = reshape(shape = var_2157, x = transpose_45)[name = tensor("key_states_95_cast")]; + tensor var_2159 = const()[name = tensor("op_2159"), val = tensor([20, -1, 64])]; + tensor transpose_44 = transpose(perm = var_2144_perm_0, x = var_2143_cast)[name = tensor("transpose_44")]; + tensor value_states_95_cast = reshape(shape = var_2159, x = transpose_44)[name = tensor("value_states_95_cast")]; + tensor var_2162_perm_0 = const()[name = tensor("op_2162_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_139_transpose_x_0 = const()[name = tensor("attn_weights_139_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_139_transpose_y_0 = const()[name = tensor("attn_weights_139_transpose_y_0"), val = tensor(false)]; + tensor transpose_42 = transpose(perm = var_2162_perm_0, x = key_states_95_cast)[name = tensor("transpose_42")]; + tensor attn_weights_139_cast = matmul(transpose_x = attn_weights_139_transpose_x_0, transpose_y = attn_weights_139_transpose_y_0, x = query_states_47_cast, y = transpose_42)[name = tensor("attn_weights_139_cast")]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 20, 77, 77])]; + tensor var_2165_cast = reshape(shape = var_2164, x = attn_weights_139_cast)[name = tensor("op_2165_cast")]; + tensor attn_weights_141_cast = add(x = var_2165_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_141_cast")]; + tensor var_2170 = const()[name = tensor("op_2170"), val = tensor([20, 77, 77])]; + tensor input_373_cast = reshape(shape = var_2170, x = attn_weights_141_cast)[name = tensor("input_373_cast")]; + tensor input_375_cast = softmax(axis = var_5, x = input_373_cast)[name = tensor("input_375_cast")]; + tensor attn_output_139_transpose_x_0 = const()[name = tensor("attn_output_139_transpose_x_0"), val = tensor(false)]; + tensor attn_output_139_transpose_y_0 = const()[name = tensor("attn_output_139_transpose_y_0"), val = tensor(false)]; + tensor attn_output_139_cast = matmul(transpose_x = attn_output_139_transpose_x_0, transpose_y = attn_output_139_transpose_y_0, x = input_375_cast, y = value_states_95_cast)[name = tensor("attn_output_139_cast")]; + tensor var_2175 = const()[name = tensor("op_2175"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_141_cast = reshape(shape = var_2175, x = attn_output_139_cast)[name = tensor("attn_output_141_cast")]; + tensor attn_output_143_perm_0 = const()[name = tensor("attn_output_143_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2178 = const()[name = tensor("op_2178"), val = tensor([1, 77, 1280])]; + tensor transpose_41 = transpose(perm = attn_output_143_perm_0, x = attn_output_141_cast)[name = tensor("transpose_41")]; + tensor input_377_cast = reshape(shape = var_2178, x = transpose_41)[name = tensor("input_377_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041723264)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045000128)))]; + tensor hidden_states_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16, x = input_377_cast)[name = tensor("hidden_states_141_cast")]; + tensor input_379_cast = add(x = input_371_cast, y = hidden_states_141_cast)[name = tensor("input_379_cast")]; + tensor input_381_axes_0 = const()[name = tensor("input_381_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045002752)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045005376)))]; + tensor input_381_cast = layer_norm(axes = input_381_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16, x = input_379_cast)[name = tensor("input_381_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045008000)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058115264)))]; + tensor input_383_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16, x = input_381_cast)[name = tensor("input_383_cast")]; + tensor input_385_mode_0 = const()[name = tensor("input_385_mode_0"), val = tensor("EXACT")]; + tensor input_385_cast = gelu(mode = input_385_mode_0, x = input_383_cast)[name = tensor("input_385_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058125568)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071232832)))]; + tensor hidden_states_143_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16, x = input_385_cast)[name = tensor("hidden_states_143_cast")]; + tensor input_387_cast = add(x = input_379_cast, y = hidden_states_143_cast)[name = tensor("input_387_cast")]; + tensor hidden_states_145_axes_0 = const()[name = tensor("hidden_states_145_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071235456)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071238080)))]; + tensor hidden_states_145_cast = layer_norm(axes = hidden_states_145_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16, x = input_387_cast)[name = tensor("hidden_states_145_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071240704)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074517568)))]; + tensor var_2216_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("op_2216_cast")]; + tensor var_2217_to_fp16 = const()[name = tensor("op_2217_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_149_cast = mul(x = var_2216_cast, y = var_2217_to_fp16)[name = tensor("tensor_149_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074520192)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077797056)))]; + tensor tensor_145_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_145_cast")]; + tensor var_2222 = const()[name = tensor("op_2222"), val = tensor([1, -1, 20, 64])]; + tensor var_2223_cast = reshape(shape = var_2222, x = tensor_145_cast)[name = tensor("op_2223_cast")]; + tensor var_2224_perm_0 = const()[name = tensor("op_2224_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077799680)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081076544)))]; + tensor tensor_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_147_cast")]; + tensor var_2229 = const()[name = tensor("op_2229"), val = tensor([1, -1, 20, 64])]; + tensor var_2230_cast = reshape(shape = var_2229, x = tensor_147_cast)[name = tensor("op_2230_cast")]; + tensor var_2231_perm_0 = const()[name = tensor("op_2231_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2238 = const()[name = tensor("op_2238"), val = tensor([1, 77, 20, 64])]; + tensor var_2239_cast = reshape(shape = var_2238, x = tensor_149_cast)[name = tensor("op_2239_cast")]; + tensor var_2240_perm_0 = const()[name = tensor("op_2240_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2242 = const()[name = tensor("op_2242"), val = tensor([20, -1, 64])]; + tensor transpose_38 = transpose(perm = var_2240_perm_0, x = var_2239_cast)[name = tensor("transpose_38")]; + tensor query_states_49_cast = reshape(shape = var_2242, x = transpose_38)[name = tensor("query_states_49_cast")]; + tensor var_2244 = const()[name = tensor("op_2244"), val = tensor([20, -1, 64])]; + tensor transpose_40 = transpose(perm = var_2224_perm_0, x = var_2223_cast)[name = tensor("transpose_40")]; + tensor key_states_99_cast = reshape(shape = var_2244, x = transpose_40)[name = tensor("key_states_99_cast")]; + tensor var_2246 = const()[name = tensor("op_2246"), val = tensor([20, -1, 64])]; + tensor transpose_39 = transpose(perm = var_2231_perm_0, x = var_2230_cast)[name = tensor("transpose_39")]; + tensor value_states_99_cast = reshape(shape = var_2246, x = transpose_39)[name = tensor("value_states_99_cast")]; + tensor var_2249_perm_0 = const()[name = tensor("op_2249_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor transpose_37 = transpose(perm = var_2249_perm_0, x = key_states_99_cast)[name = tensor("transpose_37")]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = query_states_49_cast, y = transpose_37)[name = tensor("attn_weights_145_cast")]; + tensor var_2251 = const()[name = tensor("op_2251"), val = tensor([1, 20, 77, 77])]; + tensor var_2252_cast = reshape(shape = var_2251, x = attn_weights_145_cast)[name = tensor("op_2252_cast")]; + tensor attn_weights_147_cast = add(x = var_2252_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_2257 = const()[name = tensor("op_2257"), val = tensor([20, 77, 77])]; + tensor input_389_cast = reshape(shape = var_2257, x = attn_weights_147_cast)[name = tensor("input_389_cast")]; + tensor input_391_cast = softmax(axis = var_5, x = input_389_cast)[name = tensor("input_391_cast")]; + tensor attn_output_145_transpose_x_0 = const()[name = tensor("attn_output_145_transpose_x_0"), val = tensor(false)]; + tensor attn_output_145_transpose_y_0 = const()[name = tensor("attn_output_145_transpose_y_0"), val = tensor(false)]; + tensor attn_output_145_cast = matmul(transpose_x = attn_output_145_transpose_x_0, transpose_y = attn_output_145_transpose_y_0, x = input_391_cast, y = value_states_99_cast)[name = tensor("attn_output_145_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_147_cast = reshape(shape = var_2262, x = attn_output_145_cast)[name = tensor("attn_output_147_cast")]; + tensor attn_output_149_perm_0 = const()[name = tensor("attn_output_149_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2265 = const()[name = tensor("op_2265"), val = tensor([1, 77, 1280])]; + tensor transpose_36 = transpose(perm = attn_output_149_perm_0, x = attn_output_147_cast)[name = tensor("transpose_36")]; + tensor input_393_cast = reshape(shape = var_2265, x = transpose_36)[name = tensor("input_393_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081079168)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084356032)))]; + tensor hidden_states_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16, x = input_393_cast)[name = tensor("hidden_states_147_cast")]; + tensor input_395_cast = add(x = input_387_cast, y = hidden_states_147_cast)[name = tensor("input_395_cast")]; + tensor input_397_axes_0 = const()[name = tensor("input_397_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084358656)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084361280)))]; + tensor input_397_cast = layer_norm(axes = input_397_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16, x = input_395_cast)[name = tensor("input_397_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084363904)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097471168)))]; + tensor input_399_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16, x = input_397_cast)[name = tensor("input_399_cast")]; + tensor input_401_mode_0 = const()[name = tensor("input_401_mode_0"), val = tensor("EXACT")]; + tensor input_401_cast = gelu(mode = input_401_mode_0, x = input_399_cast)[name = tensor("input_401_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097481472)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110588736)))]; + tensor hidden_states_149_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16, x = input_401_cast)[name = tensor("hidden_states_149_cast")]; + tensor input_403_cast = add(x = input_395_cast, y = hidden_states_149_cast)[name = tensor("input_403_cast")]; + tensor hidden_states_151_axes_0 = const()[name = tensor("hidden_states_151_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110591360)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110593984)))]; + tensor hidden_states_151_cast = layer_norm(axes = hidden_states_151_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16, x = input_403_cast)[name = tensor("hidden_states_151_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110596608)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113873472)))]; + tensor var_2303_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("op_2303_cast")]; + tensor var_2304_to_fp16 = const()[name = tensor("op_2304_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_155_cast = mul(x = var_2303_cast, y = var_2304_to_fp16)[name = tensor("tensor_155_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113876096)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117152960)))]; + tensor tensor_151_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_151_cast")]; + tensor var_2309 = const()[name = tensor("op_2309"), val = tensor([1, -1, 20, 64])]; + tensor var_2310_cast = reshape(shape = var_2309, x = tensor_151_cast)[name = tensor("op_2310_cast")]; + tensor var_2311_perm_0 = const()[name = tensor("op_2311_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117155584)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120432448)))]; + tensor tensor_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_153_cast")]; + tensor var_2316 = const()[name = tensor("op_2316"), val = tensor([1, -1, 20, 64])]; + tensor var_2317_cast = reshape(shape = var_2316, x = tensor_153_cast)[name = tensor("op_2317_cast")]; + tensor var_2318_perm_0 = const()[name = tensor("op_2318_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2325 = const()[name = tensor("op_2325"), val = tensor([1, 77, 20, 64])]; + tensor var_2326_cast = reshape(shape = var_2325, x = tensor_155_cast)[name = tensor("op_2326_cast")]; + tensor var_2327_perm_0 = const()[name = tensor("op_2327_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2329 = const()[name = tensor("op_2329"), val = tensor([20, -1, 64])]; + tensor transpose_33 = transpose(perm = var_2327_perm_0, x = var_2326_cast)[name = tensor("transpose_33")]; + tensor query_states_51_cast = reshape(shape = var_2329, x = transpose_33)[name = tensor("query_states_51_cast")]; + tensor var_2331 = const()[name = tensor("op_2331"), val = tensor([20, -1, 64])]; + tensor transpose_35 = transpose(perm = var_2311_perm_0, x = var_2310_cast)[name = tensor("transpose_35")]; + tensor key_states_103_cast = reshape(shape = var_2331, x = transpose_35)[name = tensor("key_states_103_cast")]; + tensor var_2333 = const()[name = tensor("op_2333"), val = tensor([20, -1, 64])]; + tensor transpose_34 = transpose(perm = var_2318_perm_0, x = var_2317_cast)[name = tensor("transpose_34")]; + tensor value_states_103_cast = reshape(shape = var_2333, x = transpose_34)[name = tensor("value_states_103_cast")]; + tensor var_2336_perm_0 = const()[name = tensor("op_2336_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_151_transpose_x_0 = const()[name = tensor("attn_weights_151_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_151_transpose_y_0 = const()[name = tensor("attn_weights_151_transpose_y_0"), val = tensor(false)]; + tensor transpose_32 = transpose(perm = var_2336_perm_0, x = key_states_103_cast)[name = tensor("transpose_32")]; + tensor attn_weights_151_cast = matmul(transpose_x = attn_weights_151_transpose_x_0, transpose_y = attn_weights_151_transpose_y_0, x = query_states_51_cast, y = transpose_32)[name = tensor("attn_weights_151_cast")]; + tensor var_2338 = const()[name = tensor("op_2338"), val = tensor([1, 20, 77, 77])]; + tensor var_2339_cast = reshape(shape = var_2338, x = attn_weights_151_cast)[name = tensor("op_2339_cast")]; + tensor attn_weights_153_cast = add(x = var_2339_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_153_cast")]; + tensor var_2344 = const()[name = tensor("op_2344"), val = tensor([20, 77, 77])]; + tensor input_405_cast = reshape(shape = var_2344, x = attn_weights_153_cast)[name = tensor("input_405_cast")]; + tensor input_407_cast = softmax(axis = var_5, x = input_405_cast)[name = tensor("input_407_cast")]; + tensor attn_output_151_transpose_x_0 = const()[name = tensor("attn_output_151_transpose_x_0"), val = tensor(false)]; + tensor attn_output_151_transpose_y_0 = const()[name = tensor("attn_output_151_transpose_y_0"), val = tensor(false)]; + tensor attn_output_151_cast = matmul(transpose_x = attn_output_151_transpose_x_0, transpose_y = attn_output_151_transpose_y_0, x = input_407_cast, y = value_states_103_cast)[name = tensor("attn_output_151_cast")]; + tensor var_2349 = const()[name = tensor("op_2349"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_153_cast = reshape(shape = var_2349, x = attn_output_151_cast)[name = tensor("attn_output_153_cast")]; + tensor attn_output_155_perm_0 = const()[name = tensor("attn_output_155_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2352 = const()[name = tensor("op_2352"), val = tensor([1, 77, 1280])]; + tensor transpose_31 = transpose(perm = attn_output_155_perm_0, x = attn_output_153_cast)[name = tensor("transpose_31")]; + tensor input_409_cast = reshape(shape = var_2352, x = transpose_31)[name = tensor("input_409_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120435072)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123711936)))]; + tensor hidden_states_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16, x = input_409_cast)[name = tensor("hidden_states_153_cast")]; + tensor input_411_cast = add(x = input_403_cast, y = hidden_states_153_cast)[name = tensor("input_411_cast")]; + tensor input_413_axes_0 = const()[name = tensor("input_413_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123714560)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123717184)))]; + tensor input_413_cast = layer_norm(axes = input_413_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16, x = input_411_cast)[name = tensor("input_413_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123719808)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136827072)))]; + tensor input_415_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16, x = input_413_cast)[name = tensor("input_415_cast")]; + tensor input_417_mode_0 = const()[name = tensor("input_417_mode_0"), val = tensor("EXACT")]; + tensor input_417_cast = gelu(mode = input_417_mode_0, x = input_415_cast)[name = tensor("input_417_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136837376)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149944640)))]; + tensor hidden_states_155_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16, x = input_417_cast)[name = tensor("hidden_states_155_cast")]; + tensor input_419_cast = add(x = input_411_cast, y = hidden_states_155_cast)[name = tensor("input_419_cast")]; + tensor hidden_states_157_axes_0 = const()[name = tensor("hidden_states_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149947264)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149949888)))]; + tensor hidden_states_157_cast = layer_norm(axes = hidden_states_157_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16, x = input_419_cast)[name = tensor("hidden_states_157_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149952512)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153229376)))]; + tensor var_2390_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("op_2390_cast")]; + tensor var_2391_to_fp16 = const()[name = tensor("op_2391_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_161_cast = mul(x = var_2390_cast, y = var_2391_to_fp16)[name = tensor("tensor_161_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153232000)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156508864)))]; + tensor tensor_157_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_157_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([1, -1, 20, 64])]; + tensor var_2397_cast = reshape(shape = var_2396, x = tensor_157_cast)[name = tensor("op_2397_cast")]; + tensor var_2398_perm_0 = const()[name = tensor("op_2398_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156511488)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159788352)))]; + tensor tensor_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_159_cast")]; + tensor var_2403 = const()[name = tensor("op_2403"), val = tensor([1, -1, 20, 64])]; + tensor var_2404_cast = reshape(shape = var_2403, x = tensor_159_cast)[name = tensor("op_2404_cast")]; + tensor var_2405_perm_0 = const()[name = tensor("op_2405_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 77, 20, 64])]; + tensor var_2413_cast = reshape(shape = var_2412, x = tensor_161_cast)[name = tensor("op_2413_cast")]; + tensor var_2414_perm_0 = const()[name = tensor("op_2414_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2416 = const()[name = tensor("op_2416"), val = tensor([20, -1, 64])]; + tensor transpose_28 = transpose(perm = var_2414_perm_0, x = var_2413_cast)[name = tensor("transpose_28")]; + tensor query_states_53_cast = reshape(shape = var_2416, x = transpose_28)[name = tensor("query_states_53_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([20, -1, 64])]; + tensor transpose_30 = transpose(perm = var_2398_perm_0, x = var_2397_cast)[name = tensor("transpose_30")]; + tensor key_states_107_cast = reshape(shape = var_2418, x = transpose_30)[name = tensor("key_states_107_cast")]; + tensor var_2420 = const()[name = tensor("op_2420"), val = tensor([20, -1, 64])]; + tensor transpose_29 = transpose(perm = var_2405_perm_0, x = var_2404_cast)[name = tensor("transpose_29")]; + tensor value_states_107_cast = reshape(shape = var_2420, x = transpose_29)[name = tensor("value_states_107_cast")]; + tensor var_2423_perm_0 = const()[name = tensor("op_2423_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor transpose_27 = transpose(perm = var_2423_perm_0, x = key_states_107_cast)[name = tensor("transpose_27")]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = query_states_53_cast, y = transpose_27)[name = tensor("attn_weights_157_cast")]; + tensor var_2425 = const()[name = tensor("op_2425"), val = tensor([1, 20, 77, 77])]; + tensor var_2426_cast = reshape(shape = var_2425, x = attn_weights_157_cast)[name = tensor("op_2426_cast")]; + tensor attn_weights_159_cast = add(x = var_2426_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_2431 = const()[name = tensor("op_2431"), val = tensor([20, 77, 77])]; + tensor input_421_cast = reshape(shape = var_2431, x = attn_weights_159_cast)[name = tensor("input_421_cast")]; + tensor input_423_cast = softmax(axis = var_5, x = input_421_cast)[name = tensor("input_423_cast")]; + tensor attn_output_157_transpose_x_0 = const()[name = tensor("attn_output_157_transpose_x_0"), val = tensor(false)]; + tensor attn_output_157_transpose_y_0 = const()[name = tensor("attn_output_157_transpose_y_0"), val = tensor(false)]; + tensor attn_output_157_cast = matmul(transpose_x = attn_output_157_transpose_x_0, transpose_y = attn_output_157_transpose_y_0, x = input_423_cast, y = value_states_107_cast)[name = tensor("attn_output_157_cast")]; + tensor var_2436 = const()[name = tensor("op_2436"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_159_cast = reshape(shape = var_2436, x = attn_output_157_cast)[name = tensor("attn_output_159_cast")]; + tensor attn_output_161_perm_0 = const()[name = tensor("attn_output_161_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 77, 1280])]; + tensor transpose_26 = transpose(perm = attn_output_161_perm_0, x = attn_output_159_cast)[name = tensor("transpose_26")]; + tensor input_425_cast = reshape(shape = var_2439, x = transpose_26)[name = tensor("input_425_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159790976)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163067840)))]; + tensor hidden_states_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16, x = input_425_cast)[name = tensor("hidden_states_159_cast")]; + tensor input_427_cast = add(x = input_419_cast, y = hidden_states_159_cast)[name = tensor("input_427_cast")]; + tensor input_429_axes_0 = const()[name = tensor("input_429_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163070464)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163073088)))]; + tensor input_429_cast = layer_norm(axes = input_429_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16, x = input_427_cast)[name = tensor("input_429_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163075712)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176182976)))]; + tensor input_431_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16, x = input_429_cast)[name = tensor("input_431_cast")]; + tensor input_433_mode_0 = const()[name = tensor("input_433_mode_0"), val = tensor("EXACT")]; + tensor input_433_cast = gelu(mode = input_433_mode_0, x = input_431_cast)[name = tensor("input_433_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176193280)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189300544)))]; + tensor hidden_states_161_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16, x = input_433_cast)[name = tensor("hidden_states_161_cast")]; + tensor input_435_cast = add(x = input_427_cast, y = hidden_states_161_cast)[name = tensor("input_435_cast")]; + tensor hidden_states_163_axes_0 = const()[name = tensor("hidden_states_163_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189303168)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189305792)))]; + tensor hidden_states_163_cast = layer_norm(axes = hidden_states_163_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16, x = input_435_cast)[name = tensor("hidden_states_163_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189308416)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192585280)))]; + tensor var_2477_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("op_2477_cast")]; + tensor var_2478_to_fp16 = const()[name = tensor("op_2478_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_167_cast = mul(x = var_2477_cast, y = var_2478_to_fp16)[name = tensor("tensor_167_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192587904)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195864768)))]; + tensor tensor_163_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_163_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1, -1, 20, 64])]; + tensor var_2484_cast = reshape(shape = var_2483, x = tensor_163_cast)[name = tensor("op_2484_cast")]; + tensor var_2485_perm_0 = const()[name = tensor("op_2485_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195867392)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199144256)))]; + tensor tensor_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_165_cast")]; + tensor var_2490 = const()[name = tensor("op_2490"), val = tensor([1, -1, 20, 64])]; + tensor var_2491_cast = reshape(shape = var_2490, x = tensor_165_cast)[name = tensor("op_2491_cast")]; + tensor var_2492_perm_0 = const()[name = tensor("op_2492_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2499 = const()[name = tensor("op_2499"), val = tensor([1, 77, 20, 64])]; + tensor var_2500_cast = reshape(shape = var_2499, x = tensor_167_cast)[name = tensor("op_2500_cast")]; + tensor var_2501_perm_0 = const()[name = tensor("op_2501_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2503 = const()[name = tensor("op_2503"), val = tensor([20, -1, 64])]; + tensor transpose_23 = transpose(perm = var_2501_perm_0, x = var_2500_cast)[name = tensor("transpose_23")]; + tensor query_states_55_cast = reshape(shape = var_2503, x = transpose_23)[name = tensor("query_states_55_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([20, -1, 64])]; + tensor transpose_25 = transpose(perm = var_2485_perm_0, x = var_2484_cast)[name = tensor("transpose_25")]; + tensor key_states_111_cast = reshape(shape = var_2505, x = transpose_25)[name = tensor("key_states_111_cast")]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([20, -1, 64])]; + tensor transpose_24 = transpose(perm = var_2492_perm_0, x = var_2491_cast)[name = tensor("transpose_24")]; + tensor value_states_111_cast = reshape(shape = var_2507, x = transpose_24)[name = tensor("value_states_111_cast")]; + tensor var_2510_perm_0 = const()[name = tensor("op_2510_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_163_transpose_x_0 = const()[name = tensor("attn_weights_163_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_163_transpose_y_0 = const()[name = tensor("attn_weights_163_transpose_y_0"), val = tensor(false)]; + tensor transpose_22 = transpose(perm = var_2510_perm_0, x = key_states_111_cast)[name = tensor("transpose_22")]; + tensor attn_weights_163_cast = matmul(transpose_x = attn_weights_163_transpose_x_0, transpose_y = attn_weights_163_transpose_y_0, x = query_states_55_cast, y = transpose_22)[name = tensor("attn_weights_163_cast")]; + tensor var_2512 = const()[name = tensor("op_2512"), val = tensor([1, 20, 77, 77])]; + tensor var_2513_cast = reshape(shape = var_2512, x = attn_weights_163_cast)[name = tensor("op_2513_cast")]; + tensor attn_weights_165_cast = add(x = var_2513_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_165_cast")]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([20, 77, 77])]; + tensor input_437_cast = reshape(shape = var_2518, x = attn_weights_165_cast)[name = tensor("input_437_cast")]; + tensor input_439_cast = softmax(axis = var_5, x = input_437_cast)[name = tensor("input_439_cast")]; + tensor attn_output_163_transpose_x_0 = const()[name = tensor("attn_output_163_transpose_x_0"), val = tensor(false)]; + tensor attn_output_163_transpose_y_0 = const()[name = tensor("attn_output_163_transpose_y_0"), val = tensor(false)]; + tensor attn_output_163_cast = matmul(transpose_x = attn_output_163_transpose_x_0, transpose_y = attn_output_163_transpose_y_0, x = input_439_cast, y = value_states_111_cast)[name = tensor("attn_output_163_cast")]; + tensor var_2523 = const()[name = tensor("op_2523"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_165_cast = reshape(shape = var_2523, x = attn_output_163_cast)[name = tensor("attn_output_165_cast")]; + tensor attn_output_167_perm_0 = const()[name = tensor("attn_output_167_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2526 = const()[name = tensor("op_2526"), val = tensor([1, 77, 1280])]; + tensor transpose_21 = transpose(perm = attn_output_167_perm_0, x = attn_output_165_cast)[name = tensor("transpose_21")]; + tensor input_441_cast = reshape(shape = var_2526, x = transpose_21)[name = tensor("input_441_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199146880)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202423744)))]; + tensor hidden_states_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16, x = input_441_cast)[name = tensor("hidden_states_165_cast")]; + tensor input_443_cast = add(x = input_435_cast, y = hidden_states_165_cast)[name = tensor("input_443_cast")]; + tensor input_445_axes_0 = const()[name = tensor("input_445_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202426368)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202428992)))]; + tensor input_445_cast = layer_norm(axes = input_445_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16, x = input_443_cast)[name = tensor("input_445_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202431616)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215538880)))]; + tensor input_447_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16, x = input_445_cast)[name = tensor("input_447_cast")]; + tensor input_449_mode_0 = const()[name = tensor("input_449_mode_0"), val = tensor("EXACT")]; + tensor input_449_cast = gelu(mode = input_449_mode_0, x = input_447_cast)[name = tensor("input_449_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215549184)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228656448)))]; + tensor hidden_states_167_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16, x = input_449_cast)[name = tensor("hidden_states_167_cast")]; + tensor input_451_cast = add(x = input_443_cast, y = hidden_states_167_cast)[name = tensor("input_451_cast")]; + tensor hidden_states_169_axes_0 = const()[name = tensor("hidden_states_169_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228659072)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228661696)))]; + tensor hidden_states_169_cast = layer_norm(axes = hidden_states_169_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16, x = input_451_cast)[name = tensor("hidden_states_169_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228664320)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231941184)))]; + tensor var_2564_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("op_2564_cast")]; + tensor var_2565_to_fp16 = const()[name = tensor("op_2565_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_173_cast = mul(x = var_2564_cast, y = var_2565_to_fp16)[name = tensor("tensor_173_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231943808)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235220672)))]; + tensor tensor_169_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_169_cast")]; + tensor var_2570 = const()[name = tensor("op_2570"), val = tensor([1, -1, 20, 64])]; + tensor var_2571_cast = reshape(shape = var_2570, x = tensor_169_cast)[name = tensor("op_2571_cast")]; + tensor var_2572_perm_0 = const()[name = tensor("op_2572_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235223296)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238500160)))]; + tensor tensor_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_171_cast")]; + tensor var_2577 = const()[name = tensor("op_2577"), val = tensor([1, -1, 20, 64])]; + tensor var_2578_cast = reshape(shape = var_2577, x = tensor_171_cast)[name = tensor("op_2578_cast")]; + tensor var_2579_perm_0 = const()[name = tensor("op_2579_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2586 = const()[name = tensor("op_2586"), val = tensor([1, 77, 20, 64])]; + tensor var_2587_cast = reshape(shape = var_2586, x = tensor_173_cast)[name = tensor("op_2587_cast")]; + tensor var_2588_perm_0 = const()[name = tensor("op_2588_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2590 = const()[name = tensor("op_2590"), val = tensor([20, -1, 64])]; + tensor transpose_18 = transpose(perm = var_2588_perm_0, x = var_2587_cast)[name = tensor("transpose_18")]; + tensor query_states_57_cast = reshape(shape = var_2590, x = transpose_18)[name = tensor("query_states_57_cast")]; + tensor var_2592 = const()[name = tensor("op_2592"), val = tensor([20, -1, 64])]; + tensor transpose_20 = transpose(perm = var_2572_perm_0, x = var_2571_cast)[name = tensor("transpose_20")]; + tensor key_states_115_cast = reshape(shape = var_2592, x = transpose_20)[name = tensor("key_states_115_cast")]; + tensor var_2594 = const()[name = tensor("op_2594"), val = tensor([20, -1, 64])]; + tensor transpose_19 = transpose(perm = var_2579_perm_0, x = var_2578_cast)[name = tensor("transpose_19")]; + tensor value_states_115_cast = reshape(shape = var_2594, x = transpose_19)[name = tensor("value_states_115_cast")]; + tensor var_2597_perm_0 = const()[name = tensor("op_2597_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor transpose_17 = transpose(perm = var_2597_perm_0, x = key_states_115_cast)[name = tensor("transpose_17")]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = query_states_57_cast, y = transpose_17)[name = tensor("attn_weights_169_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1, 20, 77, 77])]; + tensor var_2600_cast = reshape(shape = var_2599, x = attn_weights_169_cast)[name = tensor("op_2600_cast")]; + tensor attn_weights_171_cast = add(x = var_2600_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_2605 = const()[name = tensor("op_2605"), val = tensor([20, 77, 77])]; + tensor input_453_cast = reshape(shape = var_2605, x = attn_weights_171_cast)[name = tensor("input_453_cast")]; + tensor input_455_cast = softmax(axis = var_5, x = input_453_cast)[name = tensor("input_455_cast")]; + tensor attn_output_169_transpose_x_0 = const()[name = tensor("attn_output_169_transpose_x_0"), val = tensor(false)]; + tensor attn_output_169_transpose_y_0 = const()[name = tensor("attn_output_169_transpose_y_0"), val = tensor(false)]; + tensor attn_output_169_cast = matmul(transpose_x = attn_output_169_transpose_x_0, transpose_y = attn_output_169_transpose_y_0, x = input_455_cast, y = value_states_115_cast)[name = tensor("attn_output_169_cast")]; + tensor var_2610 = const()[name = tensor("op_2610"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_171_cast = reshape(shape = var_2610, x = attn_output_169_cast)[name = tensor("attn_output_171_cast")]; + tensor attn_output_173_perm_0 = const()[name = tensor("attn_output_173_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2613 = const()[name = tensor("op_2613"), val = tensor([1, 77, 1280])]; + tensor transpose_16 = transpose(perm = attn_output_173_perm_0, x = attn_output_171_cast)[name = tensor("transpose_16")]; + tensor input_457_cast = reshape(shape = var_2613, x = transpose_16)[name = tensor("input_457_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238502784)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241779648)))]; + tensor hidden_states_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16, x = input_457_cast)[name = tensor("hidden_states_171_cast")]; + tensor input_459_cast = add(x = input_451_cast, y = hidden_states_171_cast)[name = tensor("input_459_cast")]; + tensor input_461_axes_0 = const()[name = tensor("input_461_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241782272)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241784896)))]; + tensor input_461_cast = layer_norm(axes = input_461_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16, x = input_459_cast)[name = tensor("input_461_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241787520)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254894784)))]; + tensor input_463_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16, x = input_461_cast)[name = tensor("input_463_cast")]; + tensor input_465_mode_0 = const()[name = tensor("input_465_mode_0"), val = tensor("EXACT")]; + tensor input_465_cast = gelu(mode = input_465_mode_0, x = input_463_cast)[name = tensor("input_465_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254905088)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268012352)))]; + tensor hidden_states_173_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16, x = input_465_cast)[name = tensor("hidden_states_173_cast")]; + tensor input_467_cast = add(x = input_459_cast, y = hidden_states_173_cast)[name = tensor("input_467_cast")]; + tensor hidden_states_175_axes_0 = const()[name = tensor("hidden_states_175_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268014976)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268017600)))]; + tensor hidden_states_175_cast = layer_norm(axes = hidden_states_175_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16, x = input_467_cast)[name = tensor("hidden_states_175_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268020224)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271297088)))]; + tensor var_2651_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("op_2651_cast")]; + tensor var_2652_to_fp16 = const()[name = tensor("op_2652_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_179_cast = mul(x = var_2651_cast, y = var_2652_to_fp16)[name = tensor("tensor_179_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271299712)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274576576)))]; + tensor tensor_175_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_175_cast")]; + tensor var_2657 = const()[name = tensor("op_2657"), val = tensor([1, -1, 20, 64])]; + tensor var_2658_cast = reshape(shape = var_2657, x = tensor_175_cast)[name = tensor("op_2658_cast")]; + tensor var_2659_perm_0 = const()[name = tensor("op_2659_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274579200)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277856064)))]; + tensor tensor_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_177_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1, -1, 20, 64])]; + tensor var_2665_cast = reshape(shape = var_2664, x = tensor_177_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_perm_0 = const()[name = tensor("op_2666_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2673 = const()[name = tensor("op_2673"), val = tensor([1, 77, 20, 64])]; + tensor var_2674_cast = reshape(shape = var_2673, x = tensor_179_cast)[name = tensor("op_2674_cast")]; + tensor var_2675_perm_0 = const()[name = tensor("op_2675_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2677 = const()[name = tensor("op_2677"), val = tensor([20, -1, 64])]; + tensor transpose_13 = transpose(perm = var_2675_perm_0, x = var_2674_cast)[name = tensor("transpose_13")]; + tensor query_states_59_cast = reshape(shape = var_2677, x = transpose_13)[name = tensor("query_states_59_cast")]; + tensor var_2679 = const()[name = tensor("op_2679"), val = tensor([20, -1, 64])]; + tensor transpose_15 = transpose(perm = var_2659_perm_0, x = var_2658_cast)[name = tensor("transpose_15")]; + tensor key_states_119_cast = reshape(shape = var_2679, x = transpose_15)[name = tensor("key_states_119_cast")]; + tensor var_2681 = const()[name = tensor("op_2681"), val = tensor([20, -1, 64])]; + tensor transpose_14 = transpose(perm = var_2666_perm_0, x = var_2665_cast)[name = tensor("transpose_14")]; + tensor value_states_119_cast = reshape(shape = var_2681, x = transpose_14)[name = tensor("value_states_119_cast")]; + tensor var_2684_perm_0 = const()[name = tensor("op_2684_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_175_transpose_x_0 = const()[name = tensor("attn_weights_175_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_175_transpose_y_0 = const()[name = tensor("attn_weights_175_transpose_y_0"), val = tensor(false)]; + tensor transpose_12 = transpose(perm = var_2684_perm_0, x = key_states_119_cast)[name = tensor("transpose_12")]; + tensor attn_weights_175_cast = matmul(transpose_x = attn_weights_175_transpose_x_0, transpose_y = attn_weights_175_transpose_y_0, x = query_states_59_cast, y = transpose_12)[name = tensor("attn_weights_175_cast")]; + tensor var_2686 = const()[name = tensor("op_2686"), val = tensor([1, 20, 77, 77])]; + tensor var_2687_cast = reshape(shape = var_2686, x = attn_weights_175_cast)[name = tensor("op_2687_cast")]; + tensor attn_weights_177_cast = add(x = var_2687_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_177_cast")]; + tensor var_2692 = const()[name = tensor("op_2692"), val = tensor([20, 77, 77])]; + tensor input_469_cast = reshape(shape = var_2692, x = attn_weights_177_cast)[name = tensor("input_469_cast")]; + tensor input_471_cast = softmax(axis = var_5, x = input_469_cast)[name = tensor("input_471_cast")]; + tensor attn_output_175_transpose_x_0 = const()[name = tensor("attn_output_175_transpose_x_0"), val = tensor(false)]; + tensor attn_output_175_transpose_y_0 = const()[name = tensor("attn_output_175_transpose_y_0"), val = tensor(false)]; + tensor attn_output_175_cast = matmul(transpose_x = attn_output_175_transpose_x_0, transpose_y = attn_output_175_transpose_y_0, x = input_471_cast, y = value_states_119_cast)[name = tensor("attn_output_175_cast")]; + tensor var_2697 = const()[name = tensor("op_2697"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_177_cast = reshape(shape = var_2697, x = attn_output_175_cast)[name = tensor("attn_output_177_cast")]; + tensor attn_output_179_perm_0 = const()[name = tensor("attn_output_179_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2700 = const()[name = tensor("op_2700"), val = tensor([1, 77, 1280])]; + tensor transpose_11 = transpose(perm = attn_output_179_perm_0, x = attn_output_177_cast)[name = tensor("transpose_11")]; + tensor input_473_cast = reshape(shape = var_2700, x = transpose_11)[name = tensor("input_473_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277858688)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281135552)))]; + tensor hidden_states_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16, x = input_473_cast)[name = tensor("hidden_states_177_cast")]; + tensor input_475_cast = add(x = input_467_cast, y = hidden_states_177_cast)[name = tensor("input_475_cast")]; + tensor input_477_axes_0 = const()[name = tensor("input_477_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281138176)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281140800)))]; + tensor input_477_cast = layer_norm(axes = input_477_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16, x = input_475_cast)[name = tensor("input_477_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281143424)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294250688)))]; + tensor input_479_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16, x = input_477_cast)[name = tensor("input_479_cast")]; + tensor input_481_mode_0 = const()[name = tensor("input_481_mode_0"), val = tensor("EXACT")]; + tensor input_481_cast = gelu(mode = input_481_mode_0, x = input_479_cast)[name = tensor("input_481_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294260992)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307368256)))]; + tensor hidden_states_179_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16, x = input_481_cast)[name = tensor("hidden_states_179_cast")]; + tensor input_483_cast = add(x = input_475_cast, y = hidden_states_179_cast)[name = tensor("input_483_cast")]; + tensor hidden_states_181_axes_0 = const()[name = tensor("hidden_states_181_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307370880)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307373504)))]; + tensor hidden_states_181_cast = layer_norm(axes = hidden_states_181_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16, x = input_483_cast)[name = tensor("hidden_states_181_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307376128)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310652992)))]; + tensor var_2738_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("op_2738_cast")]; + tensor var_2739_to_fp16 = const()[name = tensor("op_2739_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_185_cast = mul(x = var_2738_cast, y = var_2739_to_fp16)[name = tensor("tensor_185_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310655616)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313932480)))]; + tensor tensor_181_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_181_cast")]; + tensor var_2744 = const()[name = tensor("op_2744"), val = tensor([1, -1, 20, 64])]; + tensor var_2745_cast = reshape(shape = var_2744, x = tensor_181_cast)[name = tensor("op_2745_cast")]; + tensor var_2746_perm_0 = const()[name = tensor("op_2746_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313935104)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317211968)))]; + tensor tensor_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_183_cast")]; + tensor var_2751 = const()[name = tensor("op_2751"), val = tensor([1, -1, 20, 64])]; + tensor var_2752_cast = reshape(shape = var_2751, x = tensor_183_cast)[name = tensor("op_2752_cast")]; + tensor var_2753_perm_0 = const()[name = tensor("op_2753_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2760 = const()[name = tensor("op_2760"), val = tensor([1, 77, 20, 64])]; + tensor var_2761_cast = reshape(shape = var_2760, x = tensor_185_cast)[name = tensor("op_2761_cast")]; + tensor var_2762_perm_0 = const()[name = tensor("op_2762_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([20, -1, 64])]; + tensor transpose_8 = transpose(perm = var_2762_perm_0, x = var_2761_cast)[name = tensor("transpose_8")]; + tensor query_states_61_cast = reshape(shape = var_2764, x = transpose_8)[name = tensor("query_states_61_cast")]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([20, -1, 64])]; + tensor transpose_10 = transpose(perm = var_2746_perm_0, x = var_2745_cast)[name = tensor("transpose_10")]; + tensor key_states_123_cast = reshape(shape = var_2766, x = transpose_10)[name = tensor("key_states_123_cast")]; + tensor var_2768 = const()[name = tensor("op_2768"), val = tensor([20, -1, 64])]; + tensor transpose_9 = transpose(perm = var_2753_perm_0, x = var_2752_cast)[name = tensor("transpose_9")]; + tensor value_states_123_cast = reshape(shape = var_2768, x = transpose_9)[name = tensor("value_states_123_cast")]; + tensor var_2771_perm_0 = const()[name = tensor("op_2771_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor transpose_7 = transpose(perm = var_2771_perm_0, x = key_states_123_cast)[name = tensor("transpose_7")]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = query_states_61_cast, y = transpose_7)[name = tensor("attn_weights_181_cast")]; + tensor var_2773 = const()[name = tensor("op_2773"), val = tensor([1, 20, 77, 77])]; + tensor var_2774_cast = reshape(shape = var_2773, x = attn_weights_181_cast)[name = tensor("op_2774_cast")]; + tensor attn_weights_183_cast = add(x = var_2774_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_2779 = const()[name = tensor("op_2779"), val = tensor([20, 77, 77])]; + tensor input_485_cast = reshape(shape = var_2779, x = attn_weights_183_cast)[name = tensor("input_485_cast")]; + tensor input_487_cast = softmax(axis = var_5, x = input_485_cast)[name = tensor("input_487_cast")]; + tensor attn_output_181_transpose_x_0 = const()[name = tensor("attn_output_181_transpose_x_0"), val = tensor(false)]; + tensor attn_output_181_transpose_y_0 = const()[name = tensor("attn_output_181_transpose_y_0"), val = tensor(false)]; + tensor attn_output_181_cast = matmul(transpose_x = attn_output_181_transpose_x_0, transpose_y = attn_output_181_transpose_y_0, x = input_487_cast, y = value_states_123_cast)[name = tensor("attn_output_181_cast")]; + tensor var_2784 = const()[name = tensor("op_2784"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_183_cast = reshape(shape = var_2784, x = attn_output_181_cast)[name = tensor("attn_output_183_cast")]; + tensor attn_output_185_perm_0 = const()[name = tensor("attn_output_185_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2787 = const()[name = tensor("op_2787"), val = tensor([1, 77, 1280])]; + tensor transpose_6 = transpose(perm = attn_output_185_perm_0, x = attn_output_183_cast)[name = tensor("transpose_6")]; + tensor input_489_cast = reshape(shape = var_2787, x = transpose_6)[name = tensor("input_489_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317214592)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320491456)))]; + tensor hidden_states_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16, x = input_489_cast)[name = tensor("hidden_states_183_cast")]; + tensor input_491_cast = add(x = input_483_cast, y = hidden_states_183_cast)[name = tensor("input_491_cast")]; + tensor input_493_axes_0 = const()[name = tensor("input_493_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320494080)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320496704)))]; + tensor input_493_cast = layer_norm(axes = input_493_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16, x = input_491_cast)[name = tensor("input_493_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320499328)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333606592)))]; + tensor input_495_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16, x = input_493_cast)[name = tensor("input_495_cast")]; + tensor input_497_mode_0 = const()[name = tensor("input_497_mode_0"), val = tensor("EXACT")]; + tensor input_497_cast = gelu(mode = input_497_mode_0, x = input_495_cast)[name = tensor("input_497_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333616896)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346724160)))]; + tensor hidden_states_185_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16, x = input_497_cast)[name = tensor("hidden_states_185_cast")]; + tensor input_499_cast = add(x = input_491_cast, y = hidden_states_185_cast)[name = tensor("input_499_cast")]; + tensor input_499_cast_to_fp32_dtype_0 = const()[name = tensor("input_499_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor hidden_states_187_axes_0 = const()[name = tensor("hidden_states_187_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346726784)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346729408)))]; + tensor hidden_states_187_cast = layer_norm(axes = hidden_states_187_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16, x = input_499_cast)[name = tensor("hidden_states_187_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346732032)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350008896)))]; + tensor var_2825_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("op_2825_cast")]; + tensor var_2826_to_fp16 = const()[name = tensor("op_2826_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_cast = mul(x = var_2825_cast, y = var_2826_to_fp16)[name = tensor("tensor_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350011520)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353288384)))]; + tensor tensor_187_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_187_cast")]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, -1, 20, 64])]; + tensor var_2832_cast = reshape(shape = var_2831, x = tensor_187_cast)[name = tensor("op_2832_cast")]; + tensor var_2833_perm_0 = const()[name = tensor("op_2833_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353291008)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356567872)))]; + tensor tensor_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_189_cast")]; + tensor var_2838 = const()[name = tensor("op_2838"), val = tensor([1, -1, 20, 64])]; + tensor var_2839_cast = reshape(shape = var_2838, x = tensor_189_cast)[name = tensor("op_2839_cast")]; + tensor var_2840_perm_0 = const()[name = tensor("op_2840_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2847 = const()[name = tensor("op_2847"), val = tensor([1, 77, 20, 64])]; + tensor var_2848_cast = reshape(shape = var_2847, x = tensor_cast)[name = tensor("op_2848_cast")]; + tensor var_2849_perm_0 = const()[name = tensor("op_2849_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2851 = const()[name = tensor("op_2851"), val = tensor([20, -1, 64])]; + tensor transpose_3 = transpose(perm = var_2849_perm_0, x = var_2848_cast)[name = tensor("transpose_3")]; + tensor query_states_cast = reshape(shape = var_2851, x = transpose_3)[name = tensor("query_states_cast")]; + tensor var_2853 = const()[name = tensor("op_2853"), val = tensor([20, -1, 64])]; + tensor transpose_5 = transpose(perm = var_2833_perm_0, x = var_2832_cast)[name = tensor("transpose_5")]; + tensor key_states_cast = reshape(shape = var_2853, x = transpose_5)[name = tensor("key_states_cast")]; + tensor var_2855 = const()[name = tensor("op_2855"), val = tensor([20, -1, 64])]; + tensor transpose_4 = transpose(perm = var_2840_perm_0, x = var_2839_cast)[name = tensor("transpose_4")]; + tensor value_states_cast = reshape(shape = var_2855, x = transpose_4)[name = tensor("value_states_cast")]; + tensor var_2858_perm_0 = const()[name = tensor("op_2858_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_187_transpose_x_0 = const()[name = tensor("attn_weights_187_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_187_transpose_y_0 = const()[name = tensor("attn_weights_187_transpose_y_0"), val = tensor(false)]; + tensor transpose_2 = transpose(perm = var_2858_perm_0, x = key_states_cast)[name = tensor("transpose_2")]; + tensor attn_weights_187_cast = matmul(transpose_x = attn_weights_187_transpose_x_0, transpose_y = attn_weights_187_transpose_y_0, x = query_states_cast, y = transpose_2)[name = tensor("attn_weights_187_cast")]; + tensor var_2860 = const()[name = tensor("op_2860"), val = tensor([1, 20, 77, 77])]; + tensor var_2861_cast = reshape(shape = var_2860, x = attn_weights_187_cast)[name = tensor("op_2861_cast")]; + tensor attn_weights_189_cast = add(x = var_2861_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_189_cast")]; + tensor var_2866 = const()[name = tensor("op_2866"), val = tensor([20, 77, 77])]; + tensor input_501_cast = reshape(shape = var_2866, x = attn_weights_189_cast)[name = tensor("input_501_cast")]; + tensor input_503_cast = softmax(axis = var_5, x = input_501_cast)[name = tensor("input_503_cast")]; + tensor attn_output_187_transpose_x_0 = const()[name = tensor("attn_output_187_transpose_x_0"), val = tensor(false)]; + tensor attn_output_187_transpose_y_0 = const()[name = tensor("attn_output_187_transpose_y_0"), val = tensor(false)]; + tensor attn_output_187_cast = matmul(transpose_x = attn_output_187_transpose_x_0, transpose_y = attn_output_187_transpose_y_0, x = input_503_cast, y = value_states_cast)[name = tensor("attn_output_187_cast")]; + tensor var_2871 = const()[name = tensor("op_2871"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_189_cast = reshape(shape = var_2871, x = attn_output_187_cast)[name = tensor("attn_output_189_cast")]; + tensor attn_output_perm_0 = const()[name = tensor("attn_output_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2874 = const()[name = tensor("op_2874"), val = tensor([1, 77, 1280])]; + tensor transpose_1 = transpose(perm = attn_output_perm_0, x = attn_output_189_cast)[name = tensor("transpose_1")]; + tensor input_505_cast = reshape(shape = var_2874, x = transpose_1)[name = tensor("input_505_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356570496)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359847360)))]; + tensor hidden_states_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16, x = input_505_cast)[name = tensor("hidden_states_189_cast")]; + tensor input_507_cast = add(x = input_499_cast, y = hidden_states_189_cast)[name = tensor("input_507_cast")]; + tensor input_509_axes_0 = const()[name = tensor("input_509_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359849984)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359852608)))]; + tensor input_509_cast = layer_norm(axes = input_509_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16, x = input_507_cast)[name = tensor("input_509_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359855232)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372962496)))]; + tensor input_511_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16, x = input_509_cast)[name = tensor("input_511_cast")]; + tensor input_513_mode_0 = const()[name = tensor("input_513_mode_0"), val = tensor("EXACT")]; + tensor input_513_cast = gelu(mode = input_513_mode_0, x = input_511_cast)[name = tensor("input_513_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372972800)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386080064)))]; + tensor hidden_states_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16, x = input_513_cast)[name = tensor("hidden_states_cast")]; + tensor input_515_cast = add(x = input_507_cast, y = hidden_states_cast)[name = tensor("input_515_cast")]; + tensor last_hidden_state_axes_0 = const()[name = tensor("last_hidden_state_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386082688)))]; + tensor text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386085312)))]; + tensor last_hidden_state_cast = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_515_cast)[name = tensor("last_hidden_state_cast")]; + tensor var_2902 = const()[name = tensor("op_2902"), val = tensor([0])]; + tensor var_2904 = reduce_argmax(axis = var_5, keep_dims = var_6, x = cast_1322)[name = tensor("op_2904")]; + tensor stack_0_axis_0 = const()[name = tensor("stack_0_axis_0"), val = tensor(1)]; + tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_2902, var_2904))[name = tensor("stack_0")]; + tensor input_transpose_batch_dims_0 = const()[name = tensor("input_transpose_batch_dims_0"), val = tensor(0)]; + tensor input_transpose_cast = gather_nd(batch_dims = input_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast)[name = tensor("input_transpose_cast")]; + tensor text_encoder_text_projection_weight_to_fp16 = const()[name = tensor("text_encoder_text_projection_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386087936)))]; + tensor var_2911_bias_0_to_fp16 = const()[name = tensor("op_2911_bias_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389364800)))]; + tensor var_2911_cast = linear(bias = var_2911_bias_0_to_fp16, weight = text_encoder_text_projection_weight_to_fp16, x = input_transpose_cast)[name = tensor("op_2911_cast")]; + tensor var_2911_cast_to_fp32_dtype_0 = const()[name = tensor("op_2911_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor pooled_outputs = cast(dtype = var_2911_cast_to_fp32_dtype_0, x = var_2911_cast)[name = tensor("cast_325")]; + tensor hidden_embeds = cast(dtype = input_499_cast_to_fp32_dtype_0, x = input_499_cast)[name = tensor("cast_359")]; + } -> (hidden_embeds, pooled_outputs); +} \ No newline at end of file